diff --git a/.gitattributes b/.gitattributes index 75382f2fa883c4b4046312522d5dfbea76c904c7..551fa4166315ed87a4d932bf8465ff8e333bce1a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -42,14 +42,3 @@ DeepSeek-R1-Distill-Qwen-14B_3bit/tokenizer.json filter=lfs diff=lfs merge=lfs - DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text DeepSeek-R1-Distill-Qwen-14B_6bit/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-4bit/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-6bit/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-3bit/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-8bit/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text -DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/config.json b/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/config.json deleted file mode 100644 index c666d679ba14c74ddbb4b359e3f39bb19a44c5e8..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/config.json +++ /dev/null @@ -1,3029 +0,0 @@ -{ - "architectures": [ - "Qwen2ForCausalLM" - ], - "attention_dropout": 0.0, - "bos_token_id": 151643, - "eos_token_id": 151643, - "hidden_act": "silu", - "hidden_size": 5120, - "initializer_range": 0.02, - "intermediate_size": 13824, - "max_position_embeddings": 131072, - "max_window_layers": 48, - "model_type": "qwen2", - "num_attention_heads": 40, - "num_hidden_layers": 48, - "num_key_value_heads": 8, - "quantization": { - "group_size": 64, - "bits": null, - "model.embed_tokens": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.0.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.rope": false, - "model.layers.0.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.0.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.input_layernorm": false, - "model.layers.0.post_attention_layernorm": false, - "model.layers.1.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.1.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.rope": false, - "model.layers.1.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.1.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.input_layernorm": false, - "model.layers.1.post_attention_layernorm": false, - "model.layers.2.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.2.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.rope": false, - "model.layers.2.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.2.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.input_layernorm": false, - "model.layers.2.post_attention_layernorm": false, - "model.layers.3.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.3.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.rope": false, - "model.layers.3.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.3.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.input_layernorm": false, - "model.layers.3.post_attention_layernorm": false, - "model.layers.4.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.4.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.rope": false, - "model.layers.4.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.4.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.input_layernorm": false, - "model.layers.4.post_attention_layernorm": false, - "model.layers.5.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.5.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.rope": false, - "model.layers.5.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.5.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.input_layernorm": false, - "model.layers.5.post_attention_layernorm": false, - "model.layers.6.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.rope": false, - "model.layers.6.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.input_layernorm": false, - "model.layers.6.post_attention_layernorm": false, - "model.layers.7.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.rope": false, - "model.layers.7.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.input_layernorm": false, - "model.layers.7.post_attention_layernorm": false, - "model.layers.8.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.8.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.rope": false, - "model.layers.8.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.8.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.input_layernorm": false, - "model.layers.8.post_attention_layernorm": false, - "model.layers.9.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.rope": false, - "model.layers.9.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.input_layernorm": false, - "model.layers.9.post_attention_layernorm": false, - "model.layers.10.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.rope": false, - "model.layers.10.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.input_layernorm": false, - "model.layers.10.post_attention_layernorm": false, - "model.layers.11.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.11.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.rope": false, - "model.layers.11.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.11.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.input_layernorm": false, - "model.layers.11.post_attention_layernorm": false, - "model.layers.12.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.rope": false, - "model.layers.12.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.input_layernorm": false, - "model.layers.12.post_attention_layernorm": false, - "model.layers.13.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.rope": false, - "model.layers.13.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.input_layernorm": false, - "model.layers.13.post_attention_layernorm": false, - "model.layers.14.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.14.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.rope": false, - "model.layers.14.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.14.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.input_layernorm": false, - "model.layers.14.post_attention_layernorm": false, - "model.layers.15.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.rope": false, - "model.layers.15.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.input_layernorm": false, - "model.layers.15.post_attention_layernorm": false, - "model.layers.16.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.rope": false, - "model.layers.16.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.input_layernorm": false, - "model.layers.16.post_attention_layernorm": false, - "model.layers.17.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.17.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.rope": false, - "model.layers.17.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.17.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.input_layernorm": false, - "model.layers.17.post_attention_layernorm": false, - "model.layers.18.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.rope": false, - "model.layers.18.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.input_layernorm": false, - "model.layers.18.post_attention_layernorm": false, - "model.layers.19.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.rope": false, - "model.layers.19.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.input_layernorm": false, - "model.layers.19.post_attention_layernorm": false, - "model.layers.20.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.20.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.rope": false, - "model.layers.20.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.20.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.input_layernorm": false, - "model.layers.20.post_attention_layernorm": false, - "model.layers.21.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.rope": false, - "model.layers.21.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.input_layernorm": false, - "model.layers.21.post_attention_layernorm": false, - "model.layers.22.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.rope": false, - "model.layers.22.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.input_layernorm": false, - "model.layers.22.post_attention_layernorm": false, - "model.layers.23.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.23.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.rope": false, - "model.layers.23.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.23.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.input_layernorm": false, - "model.layers.23.post_attention_layernorm": false, - "model.layers.24.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.rope": false, - "model.layers.24.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.input_layernorm": false, - "model.layers.24.post_attention_layernorm": false, - "model.layers.25.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.rope": false, - "model.layers.25.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.input_layernorm": false, - "model.layers.25.post_attention_layernorm": false, - "model.layers.26.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.26.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.rope": false, - "model.layers.26.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.26.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.input_layernorm": false, - "model.layers.26.post_attention_layernorm": false, - "model.layers.27.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.rope": false, - "model.layers.27.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.input_layernorm": false, - "model.layers.27.post_attention_layernorm": false, - "model.layers.28.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.rope": false, - "model.layers.28.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.input_layernorm": false, - "model.layers.28.post_attention_layernorm": false, - "model.layers.29.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.29.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.rope": false, - "model.layers.29.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.29.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.input_layernorm": false, - "model.layers.29.post_attention_layernorm": false, - "model.layers.30.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.rope": false, - "model.layers.30.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.input_layernorm": false, - "model.layers.30.post_attention_layernorm": false, - "model.layers.31.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.rope": false, - "model.layers.31.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.input_layernorm": false, - "model.layers.31.post_attention_layernorm": false, - "model.layers.32.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.32.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.rope": false, - "model.layers.32.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.32.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.input_layernorm": false, - "model.layers.32.post_attention_layernorm": false, - "model.layers.33.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.rope": false, - "model.layers.33.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.input_layernorm": false, - "model.layers.33.post_attention_layernorm": false, - "model.layers.34.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.rope": false, - "model.layers.34.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.input_layernorm": false, - "model.layers.34.post_attention_layernorm": false, - "model.layers.35.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.35.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.rope": false, - "model.layers.35.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.35.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.input_layernorm": false, - "model.layers.35.post_attention_layernorm": false, - "model.layers.36.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.rope": false, - "model.layers.36.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.input_layernorm": false, - "model.layers.36.post_attention_layernorm": false, - "model.layers.37.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.rope": false, - "model.layers.37.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.input_layernorm": false, - "model.layers.37.post_attention_layernorm": false, - "model.layers.38.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.38.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.rope": false, - "model.layers.38.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.38.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.input_layernorm": false, - "model.layers.38.post_attention_layernorm": false, - "model.layers.39.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.rope": false, - "model.layers.39.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.input_layernorm": false, - "model.layers.39.post_attention_layernorm": false, - "model.layers.40.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.rope": false, - "model.layers.40.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.input_layernorm": false, - "model.layers.40.post_attention_layernorm": false, - "model.layers.41.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.41.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.rope": false, - "model.layers.41.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.41.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.input_layernorm": false, - "model.layers.41.post_attention_layernorm": false, - "model.layers.42.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.42.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.rope": false, - "model.layers.42.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.42.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.input_layernorm": false, - "model.layers.42.post_attention_layernorm": false, - "model.layers.43.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.43.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.rope": false, - "model.layers.43.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.43.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.input_layernorm": false, - "model.layers.43.post_attention_layernorm": false, - "model.layers.44.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.44.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.rope": false, - "model.layers.44.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.44.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.input_layernorm": false, - "model.layers.44.post_attention_layernorm": false, - "model.layers.45.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.45.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.rope": false, - "model.layers.45.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.45.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.input_layernorm": false, - "model.layers.45.post_attention_layernorm": false, - "model.layers.46.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.46.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.rope": false, - "model.layers.46.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.46.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.input_layernorm": false, - "model.layers.46.post_attention_layernorm": false, - "model.layers.47.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.47.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.rope": false, - "model.layers.47.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.47.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.input_layernorm": false, - "model.layers.47.post_attention_layernorm": false, - "model.norm": false, - "lm_head": { - "group_size": 64, - "bits": 4 - } - }, - "quantization_config": { - "group_size": 64, - "bits": null, - "model.embed_tokens": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.0.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.self_attn.rope": false, - "model.layers.0.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.0.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.0.input_layernorm": false, - "model.layers.0.post_attention_layernorm": false, - "model.layers.1.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.1.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.self_attn.rope": false, - "model.layers.1.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.1.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.1.input_layernorm": false, - "model.layers.1.post_attention_layernorm": false, - "model.layers.2.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.2.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.self_attn.rope": false, - "model.layers.2.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.2.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.2.input_layernorm": false, - "model.layers.2.post_attention_layernorm": false, - "model.layers.3.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.3.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.self_attn.rope": false, - "model.layers.3.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.3.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.3.input_layernorm": false, - "model.layers.3.post_attention_layernorm": false, - "model.layers.4.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.4.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.self_attn.rope": false, - "model.layers.4.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.4.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.4.input_layernorm": false, - "model.layers.4.post_attention_layernorm": false, - "model.layers.5.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.5.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.self_attn.rope": false, - "model.layers.5.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.5.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.5.input_layernorm": false, - "model.layers.5.post_attention_layernorm": false, - "model.layers.6.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.self_attn.rope": false, - "model.layers.6.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.6.input_layernorm": false, - "model.layers.6.post_attention_layernorm": false, - "model.layers.7.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.self_attn.rope": false, - "model.layers.7.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.7.input_layernorm": false, - "model.layers.7.post_attention_layernorm": false, - "model.layers.8.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.8.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.self_attn.rope": false, - "model.layers.8.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.8.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.8.input_layernorm": false, - "model.layers.8.post_attention_layernorm": false, - "model.layers.9.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.self_attn.rope": false, - "model.layers.9.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.9.input_layernorm": false, - "model.layers.9.post_attention_layernorm": false, - "model.layers.10.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.self_attn.rope": false, - "model.layers.10.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.10.input_layernorm": false, - "model.layers.10.post_attention_layernorm": false, - "model.layers.11.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.11.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.self_attn.rope": false, - "model.layers.11.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.11.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.11.input_layernorm": false, - "model.layers.11.post_attention_layernorm": false, - "model.layers.12.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.self_attn.rope": false, - "model.layers.12.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.12.input_layernorm": false, - "model.layers.12.post_attention_layernorm": false, - "model.layers.13.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.self_attn.rope": false, - "model.layers.13.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.13.input_layernorm": false, - "model.layers.13.post_attention_layernorm": false, - "model.layers.14.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.14.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.self_attn.rope": false, - "model.layers.14.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.14.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.14.input_layernorm": false, - "model.layers.14.post_attention_layernorm": false, - "model.layers.15.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.self_attn.rope": false, - "model.layers.15.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.15.input_layernorm": false, - "model.layers.15.post_attention_layernorm": false, - "model.layers.16.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.self_attn.rope": false, - "model.layers.16.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.16.input_layernorm": false, - "model.layers.16.post_attention_layernorm": false, - "model.layers.17.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.17.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.self_attn.rope": false, - "model.layers.17.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.17.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.17.input_layernorm": false, - "model.layers.17.post_attention_layernorm": false, - "model.layers.18.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.self_attn.rope": false, - "model.layers.18.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.18.input_layernorm": false, - "model.layers.18.post_attention_layernorm": false, - "model.layers.19.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.self_attn.rope": false, - "model.layers.19.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.19.input_layernorm": false, - "model.layers.19.post_attention_layernorm": false, - "model.layers.20.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.20.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.self_attn.rope": false, - "model.layers.20.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.20.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.20.input_layernorm": false, - "model.layers.20.post_attention_layernorm": false, - "model.layers.21.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.self_attn.rope": false, - "model.layers.21.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.21.input_layernorm": false, - "model.layers.21.post_attention_layernorm": false, - "model.layers.22.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.self_attn.rope": false, - "model.layers.22.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.22.input_layernorm": false, - "model.layers.22.post_attention_layernorm": false, - "model.layers.23.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.23.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.self_attn.rope": false, - "model.layers.23.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.23.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.23.input_layernorm": false, - "model.layers.23.post_attention_layernorm": false, - "model.layers.24.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.self_attn.rope": false, - "model.layers.24.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.24.input_layernorm": false, - "model.layers.24.post_attention_layernorm": false, - "model.layers.25.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.self_attn.rope": false, - "model.layers.25.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.25.input_layernorm": false, - "model.layers.25.post_attention_layernorm": false, - "model.layers.26.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.26.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.self_attn.rope": false, - "model.layers.26.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.26.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.26.input_layernorm": false, - "model.layers.26.post_attention_layernorm": false, - "model.layers.27.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.self_attn.rope": false, - "model.layers.27.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.27.input_layernorm": false, - "model.layers.27.post_attention_layernorm": false, - "model.layers.28.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.self_attn.rope": false, - "model.layers.28.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.28.input_layernorm": false, - "model.layers.28.post_attention_layernorm": false, - "model.layers.29.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.29.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.self_attn.rope": false, - "model.layers.29.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.29.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.29.input_layernorm": false, - "model.layers.29.post_attention_layernorm": false, - "model.layers.30.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.self_attn.rope": false, - "model.layers.30.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.30.input_layernorm": false, - "model.layers.30.post_attention_layernorm": false, - "model.layers.31.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.self_attn.rope": false, - "model.layers.31.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.31.input_layernorm": false, - "model.layers.31.post_attention_layernorm": false, - "model.layers.32.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.32.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.self_attn.rope": false, - "model.layers.32.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.32.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.32.input_layernorm": false, - "model.layers.32.post_attention_layernorm": false, - "model.layers.33.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.self_attn.rope": false, - "model.layers.33.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.33.input_layernorm": false, - "model.layers.33.post_attention_layernorm": false, - "model.layers.34.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.self_attn.rope": false, - "model.layers.34.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.34.input_layernorm": false, - "model.layers.34.post_attention_layernorm": false, - "model.layers.35.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.35.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.self_attn.rope": false, - "model.layers.35.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.35.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.35.input_layernorm": false, - "model.layers.35.post_attention_layernorm": false, - "model.layers.36.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.self_attn.rope": false, - "model.layers.36.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.36.input_layernorm": false, - "model.layers.36.post_attention_layernorm": false, - "model.layers.37.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.self_attn.rope": false, - "model.layers.37.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.37.input_layernorm": false, - "model.layers.37.post_attention_layernorm": false, - "model.layers.38.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.38.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.self_attn.rope": false, - "model.layers.38.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.38.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.38.input_layernorm": false, - "model.layers.38.post_attention_layernorm": false, - "model.layers.39.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.self_attn.rope": false, - "model.layers.39.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.39.input_layernorm": false, - "model.layers.39.post_attention_layernorm": false, - "model.layers.40.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.v_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.self_attn.rope": false, - "model.layers.40.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.mlp.down_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.40.input_layernorm": false, - "model.layers.40.post_attention_layernorm": false, - "model.layers.41.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.41.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.self_attn.rope": false, - "model.layers.41.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.41.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.41.input_layernorm": false, - "model.layers.41.post_attention_layernorm": false, - "model.layers.42.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.42.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.self_attn.rope": false, - "model.layers.42.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.42.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.42.input_layernorm": false, - "model.layers.42.post_attention_layernorm": false, - "model.layers.43.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.43.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.self_attn.rope": false, - "model.layers.43.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.43.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.43.input_layernorm": false, - "model.layers.43.post_attention_layernorm": false, - "model.layers.44.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.44.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.self_attn.rope": false, - "model.layers.44.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.44.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.44.input_layernorm": false, - "model.layers.44.post_attention_layernorm": false, - "model.layers.45.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.45.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.self_attn.rope": false, - "model.layers.45.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.45.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.45.input_layernorm": false, - "model.layers.45.post_attention_layernorm": false, - "model.layers.46.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.46.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.self_attn.rope": false, - "model.layers.46.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.46.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.46.input_layernorm": false, - "model.layers.46.post_attention_layernorm": false, - "model.layers.47.self_attn.q_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.k_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.v_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.47.self_attn.o_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.self_attn.rope": false, - "model.layers.47.mlp.gate_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.mlp.down_proj": { - "group_size": 64, - "bits": 4 - }, - "model.layers.47.mlp.up_proj": { - "group_size": 64, - "bits": 3 - }, - "model.layers.47.input_layernorm": false, - "model.layers.47.post_attention_layernorm": false, - "model.norm": false, - "lm_head": { - "group_size": 64, - "bits": 4 - } - }, - "rms_norm_eps": 1e-05, - "rope_theta": 1000000.0, - "sliding_window": 131072, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.43.1", - "use_cache": true, - "use_sliding_window": false, - "vocab_size": 152064 -} \ No newline at end of file diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00001-of-00002.safetensors deleted file mode 100644 index 4331a3a393861c10484b4bf8f4859f9fd59a8a17..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ccdd1c75fe542d1c9b5ded243fd1ae6c627c852e07f466d67181991d448555ea -size 5348117358 diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00002-of-00002.safetensors deleted file mode 100644 index 13d910198e7385862eec9e58a46099ea4004080e..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:744229f4c2a22a1301aa5a3d582efa7ba228c9b460eaf47b2dd85b8e7a57fb24 -size 1440613893 diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model.safetensors.index.json deleted file mode 100644 index f1879fb354a8c0025f91d3f91d129b0ccdcdb1c9..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/model.safetensors.index.json +++ /dev/null @@ -1,1262 +0,0 @@ -{ - "metadata": { - "total_size": 6788589568 - }, - "weight_map": { - "lm_head.biases": "model-00002-of-00002.safetensors", - "lm_head.scales": "model-00002-of-00002.safetensors", - "lm_head.weight": "model-00002-of-00002.safetensors", - "model.embed_tokens.biases": "model-00001-of-00002.safetensors", - "model.embed_tokens.scales": "model-00001-of-00002.safetensors", - "model.embed_tokens.weight": "model-00001-of-00002.safetensors", - "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.40.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.41.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.norm.weight": "model-00002-of-00002.safetensors" - } -} \ No newline at end of file diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/config.json b/DeepSeek-R1-Distill-Qwen-14B-4bit/config.json deleted file mode 100644 index 1c72c7702157d4588fb410677d9c819cdda200d9..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-4bit/config.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "architectures": [ - "Qwen2ForCausalLM" - ], - "attention_dropout": 0.0, - "bos_token_id": 151643, - "eos_token_id": 151643, - "hidden_act": "silu", - "hidden_size": 5120, - "initializer_range": 0.02, - "intermediate_size": 13824, - "max_position_embeddings": 131072, - "max_window_layers": 48, - "model_type": "qwen2", - "num_attention_heads": 40, - "num_hidden_layers": 48, - "num_key_value_heads": 8, - "quantization": { - "group_size": 64, - "bits": 4 - }, - "quantization_config": { - "group_size": 64, - "bits": 4 - }, - "rms_norm_eps": 1e-05, - "rope_theta": 1000000.0, - "sliding_window": 131072, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.43.1", - "use_cache": true, - "use_sliding_window": false, - "vocab_size": 152064 -} \ No newline at end of file diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00001-of-00002.safetensors deleted file mode 100644 index eb90bfb2dba5d0775797b57910394c1da6d13f7e..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:561eacdd7d4d652063a099fb0b6b5340f9e3f82263f7dca70fcf9e026f5860e9 -size 5353840339 diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00002-of-00002.safetensors deleted file mode 100644 index ce5df2e9b1c19fa12eb956c320dd4d32b4be5806..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-4bit/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:09ec75fe3ad8aa351fce70d1184625b275d4ba578212951e8e4f1883fcc92ed3 -size 2955654022 diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B-4bit/model.safetensors.index.json deleted file mode 100644 index 7b762296a5ee48da56f5585b9c0e23ecf5c42cb8..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-4bit/model.safetensors.index.json +++ /dev/null @@ -1,1262 +0,0 @@ -{ - "metadata": { - "total_size": 8309352448 - }, - "weight_map": { - "lm_head.biases": "model-00002-of-00002.safetensors", - "lm_head.scales": "model-00002-of-00002.safetensors", - "lm_head.weight": "model-00002-of-00002.safetensors", - "model.embed_tokens.biases": "model-00001-of-00002.safetensors", - "model.embed_tokens.scales": "model-00001-of-00002.safetensors", - "model.embed_tokens.weight": "model-00001-of-00002.safetensors", - "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.28.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.29.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.30.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.31.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.31.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.31.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.36.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.37.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.38.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.39.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.40.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.40.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.41.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.42.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.44.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.45.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.46.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.input_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.down_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.mlp.up_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.biases": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", - "model.layers.47.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", - "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.biases": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", - "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", - "model.norm.weight": "model-00002-of-00002.safetensors" - } -} \ No newline at end of file diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B-bfloat16/special_tokens_map.json deleted file mode 100644 index 1d385d62cf08bca35254547902b792c243656ec1..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/special_tokens_map.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "bos_token": { - "content": "<|begin▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - "eos_token": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - "pad_token": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } -} diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer.json deleted file mode 100644 index 1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer.json +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893 -size 11422778 diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer_config.json deleted file mode 100644 index ef6e98c3e0446cad00c5e6fb6bf2f5bbaf2eb0bd..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/tokenizer_config.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "add_bos_token": true, - "add_eos_token": false, - "add_prefix_space": null, - "added_tokens_decoder": { - "151643": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151644": { - "content": "<|User|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151645": { - "content": "<|Assistant|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151646": { - "content": "<|begin▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151647": { - "content": "<|EOT|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151648": { - "content": "<think>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151649": { - "content": "</think>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151650": { - "content": "<|quad_start|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151651": { - "content": "<|quad_end|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151652": { - "content": "<|vision_start|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151653": { - "content": "<|vision_end|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151654": { - "content": "<|vision_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151655": { - "content": "<|image_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151656": { - "content": "<|video_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151657": { - "content": "<tool_call>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151658": { - "content": "</tool_call>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151659": { - "content": "<|fim_prefix|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151660": { - "content": "<|fim_middle|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151661": { - "content": "<|fim_suffix|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151662": { - "content": "<|fim_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151663": { - "content": "<|repo_name|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151664": { - "content": "<|file_sep|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - } - }, - "bos_token": "<|begin▁of▁sentence|>", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}", - "clean_up_tokenization_spaces": false, - "eos_token": "<|end▁of▁sentence|>", - "extra_special_tokens": {}, - "legacy": true, - "model_max_length": 16384, - "pad_token": "<|end▁of▁sentence|>", - "sp_model_kwargs": {}, - "tokenizer_class": "LlamaTokenizerFast", - "unk_token": null, - "use_default_system_prompt": false -} diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B-float16/special_tokens_map.json deleted file mode 100644 index 1d385d62cf08bca35254547902b792c243656ec1..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-float16/special_tokens_map.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "bos_token": { - "content": "<|begin▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - "eos_token": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - "pad_token": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } -} diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer.json deleted file mode 100644 index 1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer.json +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893 -size 11422778 diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer_config.json deleted file mode 100644 index ef6e98c3e0446cad00c5e6fb6bf2f5bbaf2eb0bd..0000000000000000000000000000000000000000 --- a/DeepSeek-R1-Distill-Qwen-14B-float16/tokenizer_config.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "add_bos_token": true, - "add_eos_token": false, - "add_prefix_space": null, - "added_tokens_decoder": { - "151643": { - "content": "<|end▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151644": { - "content": "<|User|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151645": { - "content": "<|Assistant|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151646": { - "content": "<|begin▁of▁sentence|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151647": { - "content": "<|EOT|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151648": { - "content": "<think>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151649": { - "content": "</think>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151650": { - "content": "<|quad_start|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151651": { - "content": "<|quad_end|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151652": { - "content": "<|vision_start|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151653": { - "content": "<|vision_end|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151654": { - "content": "<|vision_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151655": { - "content": "<|image_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151656": { - "content": "<|video_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "151657": { - "content": "<tool_call>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151658": { - "content": "</tool_call>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151659": { - "content": "<|fim_prefix|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151660": { - "content": "<|fim_middle|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151661": { - "content": "<|fim_suffix|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151662": { - "content": "<|fim_pad|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151663": { - "content": "<|repo_name|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - }, - "151664": { - "content": "<|file_sep|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": false - } - }, - "bos_token": "<|begin▁of▁sentence|>", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}", - "clean_up_tokenization_spaces": false, - "eos_token": "<|end▁of▁sentence|>", - "extra_special_tokens": {}, - "legacy": true, - "model_max_length": 16384, - "pad_token": "<|end▁of▁sentence|>", - "sp_model_kwargs": {}, - "tokenizer_class": "LlamaTokenizerFast", - "unk_token": null, - "use_default_system_prompt": false -} diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/config.json b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/config.json rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/model.safetensors b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/model.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/model.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/model.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-2,6_mixed/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_2,6_mixed/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B_3,4_mixed/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_3,4_mixed/model-00001-of-00002.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/config.json b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/config.json rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model-00001-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model-00001-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model-00001-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model-00002-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model-00002-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model-00002-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,4_mixed/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_3,6_mixed/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/config.json b/DeepSeek-R1-Distill-Qwen-14B_3bit/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/config.json rename to DeepSeek-R1-Distill-Qwen-14B_3bit/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_3bit/model-00001-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/model-00001-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_3bit/model-00001-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_3bit/model-00002-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/model-00002-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_3bit/model-00002-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_3bit/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_3bit/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_3bit/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_3bit/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_3bit/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_3bit/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_3bit/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3,6_mixed/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_3bit/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/config.json b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/config.json rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model-00001-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model-00001-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model-00001-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model-00002-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model-00002-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model-00002-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-3bit/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-3bit/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_4,6_mixed/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/config.json b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/config.json rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model-00001-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model-00001-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model-00001-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model-00002-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model-00002-of-00002.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model-00002-of-00002.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model-00002-of-00002.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,6_mixed/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_4,8_mixed/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B_4bit/model-00001-of-00002.safetensors b/DeepSeek-R1-Distill-Qwen-14B_4bit/model-00001-of-00002.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/config.json b/DeepSeek-R1-Distill-Qwen-14B_6bit/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/config.json rename to DeepSeek-R1-Distill-Qwen-14B_6bit/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/model-00001-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_6bit/model-00001-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/model-00001-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_6bit/model-00001-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/model-00002-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_6bit/model-00002-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/model-00002-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_6bit/model-00002-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/model-00003-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_6bit/model-00003-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/model-00003-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_6bit/model-00003-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_6bit/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_6bit/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_6bit/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_6bit/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_6bit/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_6bit/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_6bit/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4,8_mixed/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_6bit/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/config.json b/DeepSeek-R1-Distill-Qwen-14B_8bit/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/config.json rename to DeepSeek-R1-Distill-Qwen-14B_8bit/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/model-00001-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_8bit/model-00001-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/model-00001-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_8bit/model-00001-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/model-00002-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_8bit/model-00002-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/model-00002-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_8bit/model-00002-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/model-00003-of-00003.safetensors b/DeepSeek-R1-Distill-Qwen-14B_8bit/model-00003-of-00003.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/model-00003-of-00003.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_8bit/model-00003-of-00003.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_8bit/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_8bit/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_8bit/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4bit/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_8bit/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_8bit/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4bit/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_8bit/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-4bit/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_8bit/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-4bit/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_8bit/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/config.json b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/config.json rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00001-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00001-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00001-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00001-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00002-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00002-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00002-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00002-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00003-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00003-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00003-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00003-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00004-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00004-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00004-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00004-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00005-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00005-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00005-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00005-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00006-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00006-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model-00006-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model-00006-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-bfloat16/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-bfloat16/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-6bit/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_bfloat16/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-6bit/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_bfloat16/tokenizer_config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/config.json b/DeepSeek-R1-Distill-Qwen-14B_float16/config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/config.json rename to DeepSeek-R1-Distill-Qwen-14B_float16/config.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00001-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00001-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00001-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00001-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00002-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00002-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00002-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00002-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00003-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00003-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00003-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00003-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00004-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00004-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00004-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00004-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00005-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00005-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00005-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00005-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model-00006-of-00006.safetensors b/DeepSeek-R1-Distill-Qwen-14B_float16/model-00006-of-00006.safetensors similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model-00006-of-00006.safetensors rename to DeepSeek-R1-Distill-Qwen-14B_float16/model-00006-of-00006.safetensors diff --git a/DeepSeek-R1-Distill-Qwen-14B-float16/model.safetensors.index.json b/DeepSeek-R1-Distill-Qwen-14B_float16/model.safetensors.index.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-float16/model.safetensors.index.json rename to DeepSeek-R1-Distill-Qwen-14B_float16/model.safetensors.index.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/special_tokens_map.json b/DeepSeek-R1-Distill-Qwen-14B_float16/special_tokens_map.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/special_tokens_map.json rename to DeepSeek-R1-Distill-Qwen-14B_float16/special_tokens_map.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/tokenizer.json b/DeepSeek-R1-Distill-Qwen-14B_float16/tokenizer.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/tokenizer.json rename to DeepSeek-R1-Distill-Qwen-14B_float16/tokenizer.json diff --git a/DeepSeek-R1-Distill-Qwen-14B-8bit/tokenizer_config.json b/DeepSeek-R1-Distill-Qwen-14B_float16/tokenizer_config.json similarity index 100% rename from DeepSeek-R1-Distill-Qwen-14B-8bit/tokenizer_config.json rename to DeepSeek-R1-Distill-Qwen-14B_float16/tokenizer_config.json diff --git a/README.md b/README.md index 687be845a02814b3802ed25aeb5a84afadc7b5a7..0c0ea5714611eb6598cfd87755eaac3e871db57d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ --- quantized_by: sealad886 -license_link: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B/blob/main/LICENSE +license_link: >- + https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B/blob/main/LICENSE language: - en pipeline_tag: text-generation @@ -8,7 +9,7 @@ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B tags: - chat - mlx -- conversations +license: mit --- # mlx-community/DeepSeek-R1-Distill-Qwen-14B