quant_stage: quant_modifiers: GPTQModifier: ignore: [language_model.lm_head, 're:vision_tower.*', 're:multi_modal_projector.*'] sequential_targets: [MistralDecoderLayer] dampening_frac: 0.01 config_groups: group0: targets: [Linear] weights: {num_bits: 4, type: int, strategy: group, group_size: 128, symmetric: true, actorder: weight, observer: minmax}