quant_stage: quant_modifiers: QuantizationModifier: ignore: ['re:.*lm_head', 're:vision_tower.*', 're:multi_modal_projector.*'] config_groups: group_0: weights: {num_bits: 8, type: float, strategy: tensor, dynamic: false, symmetric: true} input_activations: {num_bits: 8, type: float, strategy: tensor, dynamic: false, symmetric: true} targets: [Linear] kv_cache_scheme: {num_bits: 8, type: float, strategy: tensor, dynamic: false, symmetric: true}