quant_stage: quant_modifiers: SmoothQuantModifier: smoothing_strength: 0.8 mappings: - - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj'] - re:.*input_layernorm - - ['re:.*gate_proj', 're:.*up_proj'] - re:.*post_attention_layernorm - - ['re:.*down_proj'] - re:.*up_proj GPTQModifier: ignore: [language_model.lm_head, 're:vision_tower.*', 're:multi_modal_projector.*'] sequential_targets: [MistralDecoderLayer] dampening_frac: 0.01 config_groups: group_0: targets: [Linear] weights: {num_bits: 8, type: int, symmetric: true, strategy: channel, observer: mse} input_activations: {num_bits: 8, type: int, symmetric: true, strategy: token, dynamic: true, observer: memoryless}