File size: 423 Bytes
a98a66a |
1 2 3 4 5 6 7 8 9 10 11 12 |
quant_stage:
quant_modifiers:
GPTQModifier:
ignore: [language_model.lm_head, 're:vision_tower.*', 're:multi_modal_projector.*']
sequential_targets: [MistralDecoderLayer]
dampening_frac: 0.01
config_groups:
group0:
targets: [Linear]
weights: {num_bits: 4, type: int, strategy: group, group_size: 128, symmetric: true,
actorder: weight, observer: minmax}
|