Davidqian123's picture
Upload model files
c2621a1 verified
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 11008,
"max_position_embeddings": 4096,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 32,
"num_key_value_heads": 32,
"pretraining_tp": 1,
"quantization_config": {
"algo_config": {
"model_decoder_layers": "model.layers",
"name": "awq",
"num_attention_heads": -1,
"num_key_value_heads": -1,
"scaling_layers": [
{
"inp": "self_attn.q_proj",
"layers": [
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj"
],
"module2inspect": "self_attn",
"prev_op": "input_layernorm"
},
{
"inp": "self_attn.o_proj",
"layers": [
"self_attn.o_proj"
],
"prev_op": "self_attn.v_proj"
},
{
"inp": "mlp.gate_proj",
"layers": [
"mlp.gate_proj",
"mlp.up_proj"
],
"module2inspect": "mlp",
"prev_op": "post_attention_layernorm"
},
{
"inp": "mlp.down_proj",
"layers": [
"mlp.down_proj"
],
"prev_op": "mlp.up_proj"
}
]
},
"exclude": [],
"export": {
"kv_cache_group": [],
"min_kv_scale": 0.0,
"pack_method": "reorder",
"weight_format": "real_quantized",
"weight_merge_groups": null
},
"global_quant_config": {
"bias": null,
"input_tensors": null,
"output_tensors": null,
"target_device": null,
"weight": {
"ch_axis": 1,
"dtype": "uint4",
"group_size": 128,
"is_dynamic": false,
"observer_cls": "PerGroupMinMaxObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_type": "float",
"symmetric": false
}
},
"layer_quant_config": {},
"layer_type_quant_config": {},
"quant_method": "quark",
"quant_mode": "eager_mode",
"softmax_quant_spec": null
},
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float32",
"transformers_version": "4.50.3",
"use_cache": true,
"vocab_size": 32000
}