wuzhiying commited on
Commit
ddad89a
1 Parent(s): 28d985c

fix BitsAndBytesConfig 8bit load issues

Browse files
Files changed (1) hide show
  1. modeling_baichuan.py +2 -1
modeling_baichuan.py CHANGED
@@ -528,7 +528,8 @@ class BaichuanForCausalLM(BaichuanPreTrainedModel):
528
  self.model = BaichuanModel(config)
529
 
530
  self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
531
- if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']:
 
532
  try:
533
  from .quantizer import quantize_offline, init_model_weight_int4
534
  except ImportError:
 
528
  self.model = BaichuanModel(config)
529
 
530
  self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
531
+ #if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']:
532
+ if hasattr(config, "quantization_config") and isinstance(config.quantization_config, dict) and config.quantization_config.get('load_in_4bit', False):
533
  try:
534
  from .quantizer import quantize_offline, init_model_weight_int4
535
  except ImportError: