File size: 532 Bytes
ffdad8a
 
 
 
 
 
 
 
 
 
d7a8773
ffdad8a
1
2
3
4
5
6
7
8
9
10
11
12
13
default_stage:
  default_modifiers:
    QuantizationModifier:
      config_groups:
        group_0:
          targets: [Linear]
          weights: {num_bits: 8, type: float, symmetric: true, strategy: channel, observer: mse}
          input_activations: {num_bits: 8, type: float, symmetric: true, strategy: token,
            dynamic: true, observer: null}
          output_activations: null
      ignore: ['re:.*lm_head', 're:.*self_attn', 're:.*router', 're:.*vision_model', 're:.*multi_modal_projector']
      targets: [Linear]