Add files using upload-large-folder tool
Browse files- chat_template.jinja +47 -0
- config.json +53 -53
- generation_config.json +1 -1
- model-00001-of-00003.safetensors +2 -2
- model-00002-of-00003.safetensors +2 -2
- model-00003-of-00003.safetensors +2 -2
- model.safetensors.index.json +197 -192
- tokenizer_config.json +0 -1
chat_template.jinja
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{{ bos_token }}
|
2 |
+
{%- if messages[0]['role'] == 'system' -%}
|
3 |
+
{%- if messages[0]['content'] is string -%}
|
4 |
+
{%- set first_user_prefix = messages[0]['content'] + '
|
5 |
+
|
6 |
+
' -%}
|
7 |
+
{%- else -%}
|
8 |
+
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '
|
9 |
+
|
10 |
+
' -%}
|
11 |
+
{%- endif -%}
|
12 |
+
{%- set loop_messages = messages[1:] -%}
|
13 |
+
{%- else -%}
|
14 |
+
{%- set first_user_prefix = "" -%}
|
15 |
+
{%- set loop_messages = messages -%}
|
16 |
+
{%- endif -%}
|
17 |
+
{%- for message in loop_messages -%}
|
18 |
+
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
|
19 |
+
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
|
20 |
+
{%- endif -%}
|
21 |
+
{%- if (message['role'] == 'assistant') -%}
|
22 |
+
{%- set role = "model" -%}
|
23 |
+
{%- else -%}
|
24 |
+
{%- set role = message['role'] -%}
|
25 |
+
{%- endif -%}
|
26 |
+
{{ '<start_of_turn>' + role + '
|
27 |
+
' + (first_user_prefix if loop.first else "") }}
|
28 |
+
{%- if message['content'] is string -%}
|
29 |
+
{{ message['content'] | trim }}
|
30 |
+
{%- elif message['content'] is iterable -%}
|
31 |
+
{%- for item in message['content'] -%}
|
32 |
+
{%- if item['type'] == 'image' -%}
|
33 |
+
{{ '<start_of_image>' }}
|
34 |
+
{%- elif item['type'] == 'text' -%}
|
35 |
+
{{ item['text'] | trim }}
|
36 |
+
{%- endif -%}
|
37 |
+
{%- endfor -%}
|
38 |
+
{%- else -%}
|
39 |
+
{{ raise_exception("Invalid content type") }}
|
40 |
+
{%- endif -%}
|
41 |
+
{{ '<end_of_turn>
|
42 |
+
' }}
|
43 |
+
{%- endfor -%}
|
44 |
+
{%- if add_generation_prompt -%}
|
45 |
+
{{'<start_of_turn>model
|
46 |
+
'}}
|
47 |
+
{%- endif -%}
|
config.json
CHANGED
@@ -25,81 +25,81 @@
|
|
25 |
"multi_modal_projector",
|
26 |
"merger",
|
27 |
"modality_projection",
|
28 |
-
"language_model.model.layers.
|
29 |
-
"language_model.model.layers.2.self_attn",
|
30 |
"language_model.model.layers.13.mlp",
|
31 |
-
"language_model.model.layers.2.mlp",
|
32 |
-
"language_model.model.layers.11.mlp",
|
33 |
"language_model.model.layers.3.self_attn",
|
34 |
-
"language_model.model.layers.12.mlp",
|
35 |
-
"language_model.model.layers.6.self_attn",
|
36 |
-
"language_model.model.layers.0.self_attn",
|
37 |
-
"language_model.model.layers.10.mlp",
|
38 |
-
"language_model.model.layers.5.self_attn",
|
39 |
-
"language_model.model.layers.9.mlp",
|
40 |
-
"language_model.model.layers.1.mlp",
|
41 |
-
"language_model.model.layers.0.mlp",
|
42 |
"language_model.model.layers.4.mlp",
|
43 |
-
"language_model.model.layers.5.mlp",
|
44 |
-
"language_model.model.layers.8.mlp",
|
45 |
"language_model.model.layers.7.self_attn",
|
46 |
-
"language_model.model.layers.
|
47 |
-
"language_model.model.layers.
|
|
|
|
|
|
|
|
|
|
|
48 |
"language_model.model.layers.6.mlp",
|
|
|
|
|
|
|
|
|
|
|
49 |
"vision_tower.vision_model.encoder.layers.26.self_attn",
|
50 |
-
"
|
51 |
-
"
|
|
|
|
|
|
|
52 |
"vision_tower.vision_model.encoder.layers.20.self_attn",
|
53 |
-
"vision_tower.vision_model.encoder.layers.
|
54 |
-
"
|
55 |
"vision_tower.vision_model.encoder.layers.22.self_attn",
|
56 |
-
"vision_tower.vision_model.encoder.layers.15.mlp",
|
57 |
"vision_tower.vision_model.encoder.layers.24.mlp",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
"vision_tower.vision_model.encoder.layers.17.self_attn",
|
59 |
-
"vision_tower.vision_model.encoder.layers.
|
60 |
"vision_tower.vision_model.encoder.layers.22.mlp",
|
|
|
61 |
"vision_tower.vision_model.encoder.layers.17.mlp",
|
62 |
-
"vision_tower.vision_model.encoder.layers.
|
63 |
-
"vision_tower.vision_model.encoder.layers.23.mlp",
|
64 |
-
"vision_tower.vision_model.encoder.layers.16.self_attn",
|
65 |
-
"vision_tower.vision_model.encoder.layers.24.self_attn",
|
66 |
-
"vision_tower.vision_model.encoder.layers.23.self_attn",
|
67 |
"vision_tower.vision_model.encoder.layers.19.mlp",
|
|
|
68 |
"vision_tower.vision_model.encoder.layers.14.self_attn",
|
69 |
-
"vision_tower.vision_model.encoder.layers.
|
70 |
-
"vision_tower.vision_model.encoder.layers.15.self_attn",
|
71 |
-
"vision_tower.vision_model.encoder.layers.21.self_attn",
|
72 |
-
"vision_tower.vision_model.encoder.layers.21.mlp",
|
73 |
-
"vision_tower.vision_model.encoder.layers.10.mlp",
|
74 |
-
"vision_tower.vision_model.encoder.layers.13.self_attn",
|
75 |
"vision_tower.vision_model.encoder.layers.14.mlp",
|
76 |
-
"vision_tower.vision_model.encoder.layers.13.
|
77 |
-
"vision_tower.vision_model.encoder.layers.
|
78 |
-
"vision_tower.vision_model.encoder.layers.11.mlp",
|
79 |
-
"vision_tower.vision_model.encoder.layers.9.mlp",
|
80 |
-
"vision_tower.vision_model.encoder.layers.8.mlp",
|
81 |
"vision_tower.vision_model.encoder.layers.10.self_attn",
|
82 |
-
"vision_tower.vision_model.encoder.layers.
|
83 |
-
"vision_tower.vision_model.encoder.layers.
|
|
|
|
|
|
|
|
|
|
|
84 |
"vision_tower.vision_model.encoder.layers.8.self_attn",
|
|
|
85 |
"vision_tower.vision_model.encoder.layers.7.mlp",
|
86 |
-
"vision_tower.vision_model.encoder.layers.11.self_attn",
|
87 |
-
"vision_tower.vision_model.encoder.layers.5.mlp",
|
88 |
-
"vision_tower.vision_model.encoder.layers.4.mlp",
|
89 |
-
"vision_tower.vision_model.encoder.layers.4.self_attn",
|
90 |
-
"vision_tower.vision_model.encoder.layers.7.self_attn",
|
91 |
-
"vision_tower.vision_model.encoder.layers.9.self_attn",
|
92 |
-
"vision_tower.vision_model.encoder.layers.6.self_attn",
|
93 |
-
"vision_tower.vision_model.encoder.layers.3.self_attn",
|
94 |
"vision_tower.vision_model.encoder.layers.5.self_attn",
|
|
|
|
|
95 |
"vision_tower.vision_model.encoder.layers.1.self_attn",
|
96 |
-
"vision_tower.vision_model.encoder.layers.
|
|
|
|
|
97 |
"vision_tower.vision_model.encoder.layers.3.mlp",
|
98 |
-
"vision_tower.vision_model.encoder.layers.
|
99 |
"vision_tower.vision_model.encoder.layers.2.mlp",
|
100 |
-
"vision_tower.vision_model.encoder.layers.
|
101 |
"vision_tower.vision_model.encoder.layers.2.self_attn",
|
102 |
-
"vision_tower.vision_model.encoder.layers.26.mlp"
|
|
|
|
|
103 |
],
|
104 |
"llm_int8_threshold": 6.0,
|
105 |
"load_in_4bit": true,
|
@@ -137,7 +137,7 @@
|
|
137 |
"vocab_size": 262208
|
138 |
},
|
139 |
"torch_dtype": "bfloat16",
|
140 |
-
"transformers_version": "4.
|
141 |
"unsloth_fixed": true,
|
142 |
"vision_config": {
|
143 |
"attention_dropout": 0.0,
|
|
|
25 |
"multi_modal_projector",
|
26 |
"merger",
|
27 |
"modality_projection",
|
28 |
+
"language_model.model.layers.1.mlp",
|
|
|
29 |
"language_model.model.layers.13.mlp",
|
|
|
|
|
30 |
"language_model.model.layers.3.self_attn",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
"language_model.model.layers.4.mlp",
|
|
|
|
|
32 |
"language_model.model.layers.7.self_attn",
|
33 |
+
"language_model.model.layers.0.mlp",
|
34 |
+
"language_model.model.layers.17.mlp",
|
35 |
+
"language_model.model.layers.18.self_attn",
|
36 |
+
"language_model.model.layers.5.mlp",
|
37 |
+
"language_model.model.layers.16.self_attn",
|
38 |
+
"language_model.model.layers.11.mlp",
|
39 |
+
"language_model.model.layers.19.self_attn",
|
40 |
"language_model.model.layers.6.mlp",
|
41 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn",
|
42 |
+
"language_model.model.layers.10.self_attn",
|
43 |
+
"language_model.model.layers.18.mlp",
|
44 |
+
"language_model.model.layers.8.mlp",
|
45 |
+
"language_model.model.layers.12.mlp",
|
46 |
"vision_tower.vision_model.encoder.layers.26.self_attn",
|
47 |
+
"language_model.model.layers.14.mlp",
|
48 |
+
"language_model.model.layers.7.mlp",
|
49 |
+
"language_model.model.layers.2.mlp",
|
50 |
+
"vision_tower.vision_model.encoder.layers.23.mlp",
|
51 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn",
|
52 |
"vision_tower.vision_model.encoder.layers.20.self_attn",
|
53 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn",
|
54 |
+
"language_model.model.layers.3.mlp",
|
55 |
"vision_tower.vision_model.encoder.layers.22.self_attn",
|
|
|
56 |
"vision_tower.vision_model.encoder.layers.24.mlp",
|
57 |
+
"vision_tower.vision_model.encoder.layers.21.mlp",
|
58 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn",
|
59 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn",
|
60 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn",
|
61 |
+
"vision_tower.vision_model.encoder.layers.18.mlp",
|
62 |
+
"vision_tower.vision_model.encoder.layers.15.mlp",
|
63 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn",
|
64 |
"vision_tower.vision_model.encoder.layers.17.self_attn",
|
65 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn",
|
66 |
"vision_tower.vision_model.encoder.layers.22.mlp",
|
67 |
+
"vision_tower.vision_model.encoder.layers.25.mlp",
|
68 |
"vision_tower.vision_model.encoder.layers.17.mlp",
|
69 |
+
"vision_tower.vision_model.encoder.layers.20.mlp",
|
|
|
|
|
|
|
|
|
70 |
"vision_tower.vision_model.encoder.layers.19.mlp",
|
71 |
+
"vision_tower.vision_model.encoder.layers.13.mlp",
|
72 |
"vision_tower.vision_model.encoder.layers.14.self_attn",
|
73 |
+
"vision_tower.vision_model.encoder.layers.16.mlp",
|
|
|
|
|
|
|
|
|
|
|
74 |
"vision_tower.vision_model.encoder.layers.14.mlp",
|
75 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn",
|
76 |
+
"vision_tower.vision_model.encoder.layers.5.mlp",
|
|
|
|
|
|
|
77 |
"vision_tower.vision_model.encoder.layers.10.self_attn",
|
78 |
+
"vision_tower.vision_model.encoder.layers.8.mlp",
|
79 |
+
"vision_tower.vision_model.encoder.layers.9.mlp",
|
80 |
+
"vision_tower.vision_model.encoder.layers.11.mlp",
|
81 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn",
|
82 |
+
"vision_tower.vision_model.encoder.layers.10.mlp",
|
83 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn",
|
84 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn",
|
85 |
"vision_tower.vision_model.encoder.layers.8.self_attn",
|
86 |
+
"vision_tower.vision_model.encoder.layers.12.mlp",
|
87 |
"vision_tower.vision_model.encoder.layers.7.mlp",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
"vision_tower.vision_model.encoder.layers.5.self_attn",
|
89 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn",
|
90 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn",
|
91 |
"vision_tower.vision_model.encoder.layers.1.self_attn",
|
92 |
+
"vision_tower.vision_model.encoder.layers.6.mlp",
|
93 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn",
|
94 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn",
|
95 |
"vision_tower.vision_model.encoder.layers.3.mlp",
|
96 |
+
"vision_tower.vision_model.encoder.layers.4.mlp",
|
97 |
"vision_tower.vision_model.encoder.layers.2.mlp",
|
98 |
+
"vision_tower.vision_model.encoder.layers.1.mlp",
|
99 |
"vision_tower.vision_model.encoder.layers.2.self_attn",
|
100 |
+
"vision_tower.vision_model.encoder.layers.26.mlp",
|
101 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn",
|
102 |
+
"vision_tower.vision_model.encoder.layers.0.mlp"
|
103 |
],
|
104 |
"llm_int8_threshold": 6.0,
|
105 |
"load_in_4bit": true,
|
|
|
137 |
"vocab_size": 262208
|
138 |
},
|
139 |
"torch_dtype": "bfloat16",
|
140 |
+
"transformers_version": "4.52.0.dev0",
|
141 |
"unsloth_fixed": true,
|
142 |
"vision_config": {
|
143 |
"attention_dropout": 0.0,
|
generation_config.json
CHANGED
@@ -9,5 +9,5 @@
|
|
9 |
"pad_token_id": 0,
|
10 |
"top_k": 64,
|
11 |
"top_p": 0.95,
|
12 |
-
"transformers_version": "4.
|
13 |
}
|
|
|
9 |
"pad_token_id": 0,
|
10 |
"top_k": 64,
|
11 |
"top_p": 0.95,
|
12 |
+
"transformers_version": "4.52.0.dev0"
|
13 |
}
|
model-00001-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:65c40514ec3214bd0c5fa5466cec219db978c16360382448a96bd6bf40b95dd6
|
3 |
+
size 4960125164
|
model-00002-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0396b9cc8d889d70cd1f15abc8827e22a14a1bfa3b156cd23339d4c106b8468e
|
3 |
+
size 4970712399
|
model-00003-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3cd951c9b3acbfb4d5c19e2e6ff7bfd3cd285ad0e75832618ed3e98b6645ad6
|
3 |
+
size 2836985438
|
model.safetensors.index.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
-
"total_size":
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
@@ -13,10 +13,30 @@
|
|
13 |
"language_model.model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
14 |
"language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
15 |
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
16 |
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
17 |
"language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
18 |
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
19 |
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
20 |
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
21 |
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
22 |
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
@@ -52,37 +72,32 @@
|
|
52 |
"language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
53 |
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
54 |
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
55 |
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
56 |
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
57 |
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
58 |
"language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
59 |
"language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
60 |
"language_model.model.layers.10.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
61 |
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
62 |
-
"language_model.model.layers.10.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
63 |
-
"language_model.model.layers.10.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
64 |
-
"language_model.model.layers.10.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
65 |
-
"language_model.model.layers.10.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
66 |
-
"language_model.model.layers.10.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
67 |
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
68 |
-
"language_model.model.layers.10.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
69 |
-
"language_model.model.layers.10.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
70 |
-
"language_model.model.layers.10.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
71 |
-
"language_model.model.layers.10.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
72 |
-
"language_model.model.layers.10.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
73 |
"language_model.model.layers.10.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
74 |
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
75 |
-
"language_model.model.layers.10.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
76 |
-
"language_model.model.layers.10.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
77 |
-
"language_model.model.layers.10.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
78 |
-
"language_model.model.layers.10.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
79 |
-
"language_model.model.layers.10.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
80 |
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
81 |
-
"language_model.model.layers.10.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
82 |
-
"language_model.model.layers.10.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
83 |
-
"language_model.model.layers.10.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
84 |
-
"language_model.model.layers.10.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
85 |
-
"language_model.model.layers.10.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
86 |
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
87 |
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
88 |
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
@@ -184,23 +199,8 @@
|
|
184 |
"language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
185 |
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
186 |
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
187 |
-
"language_model.model.layers.14.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
188 |
-
"language_model.model.layers.14.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
189 |
-
"language_model.model.layers.14.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
190 |
-
"language_model.model.layers.14.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
191 |
-
"language_model.model.layers.14.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
192 |
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
193 |
-
"language_model.model.layers.14.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
194 |
-
"language_model.model.layers.14.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
195 |
-
"language_model.model.layers.14.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
196 |
-
"language_model.model.layers.14.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
197 |
-
"language_model.model.layers.14.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
198 |
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
199 |
-
"language_model.model.layers.14.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
200 |
-
"language_model.model.layers.14.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
201 |
-
"language_model.model.layers.14.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
202 |
-
"language_model.model.layers.14.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
203 |
-
"language_model.model.layers.14.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
204 |
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
205 |
"language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
206 |
"language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
@@ -302,49 +302,14 @@
|
|
302 |
"language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
303 |
"language_model.model.layers.16.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
304 |
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
305 |
-
"language_model.model.layers.16.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
306 |
-
"language_model.model.layers.16.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
307 |
-
"language_model.model.layers.16.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
308 |
-
"language_model.model.layers.16.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
309 |
-
"language_model.model.layers.16.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
310 |
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
311 |
-
"language_model.model.layers.16.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
312 |
-
"language_model.model.layers.16.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
313 |
-
"language_model.model.layers.16.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
314 |
-
"language_model.model.layers.16.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
315 |
-
"language_model.model.layers.16.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
316 |
"language_model.model.layers.16.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
317 |
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
318 |
-
"language_model.model.layers.16.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
319 |
-
"language_model.model.layers.16.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
320 |
-
"language_model.model.layers.16.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
321 |
-
"language_model.model.layers.16.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
322 |
-
"language_model.model.layers.16.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
323 |
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
324 |
-
"language_model.model.layers.16.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
325 |
-
"language_model.model.layers.16.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
326 |
-
"language_model.model.layers.16.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
327 |
-
"language_model.model.layers.16.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
328 |
-
"language_model.model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
329 |
"language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
330 |
"language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
331 |
-
"language_model.model.layers.17.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
332 |
-
"language_model.model.layers.17.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
333 |
-
"language_model.model.layers.17.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
334 |
-
"language_model.model.layers.17.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
335 |
-
"language_model.model.layers.17.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
336 |
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
337 |
-
"language_model.model.layers.17.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
338 |
-
"language_model.model.layers.17.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
339 |
-
"language_model.model.layers.17.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
340 |
-
"language_model.model.layers.17.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
341 |
-
"language_model.model.layers.17.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
342 |
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
343 |
-
"language_model.model.layers.17.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
344 |
-
"language_model.model.layers.17.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
345 |
-
"language_model.model.layers.17.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
346 |
-
"language_model.model.layers.17.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
347 |
-
"language_model.model.layers.17.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
348 |
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
349 |
"language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
350 |
"language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
@@ -376,52 +341,17 @@
|
|
376 |
"language_model.model.layers.17.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
377 |
"language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
378 |
"language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
379 |
-
"language_model.model.layers.18.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
380 |
-
"language_model.model.layers.18.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
381 |
-
"language_model.model.layers.18.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
382 |
-
"language_model.model.layers.18.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
383 |
-
"language_model.model.layers.18.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
384 |
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
385 |
-
"language_model.model.layers.18.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
386 |
-
"language_model.model.layers.18.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
387 |
-
"language_model.model.layers.18.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
388 |
-
"language_model.model.layers.18.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
389 |
-
"language_model.model.layers.18.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
390 |
"language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
391 |
-
"language_model.model.layers.18.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
392 |
-
"language_model.model.layers.18.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
393 |
-
"language_model.model.layers.18.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
394 |
-
"language_model.model.layers.18.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
395 |
-
"language_model.model.layers.18.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
396 |
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
397 |
"language_model.model.layers.18.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
398 |
"language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
399 |
"language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
400 |
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
401 |
-
"language_model.model.layers.18.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
402 |
-
"language_model.model.layers.18.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
403 |
-
"language_model.model.layers.18.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
404 |
-
"language_model.model.layers.18.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
405 |
-
"language_model.model.layers.18.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
406 |
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
407 |
-
"language_model.model.layers.18.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
408 |
-
"language_model.model.layers.18.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
409 |
-
"language_model.model.layers.18.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
410 |
-
"language_model.model.layers.18.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
411 |
-
"language_model.model.layers.18.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
412 |
"language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
413 |
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
414 |
-
"language_model.model.layers.18.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
415 |
-
"language_model.model.layers.18.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
416 |
-
"language_model.model.layers.18.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
417 |
-
"language_model.model.layers.18.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
418 |
-
"language_model.model.layers.18.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
419 |
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
420 |
-
"language_model.model.layers.18.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
421 |
-
"language_model.model.layers.18.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
422 |
-
"language_model.model.layers.18.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
423 |
-
"language_model.model.layers.18.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
424 |
-
"language_model.model.layers.18.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
425 |
"language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
426 |
"language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
427 |
"language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
@@ -446,30 +376,10 @@
|
|
446 |
"language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
447 |
"language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
448 |
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
449 |
-
"language_model.model.layers.19.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
450 |
-
"language_model.model.layers.19.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
451 |
-
"language_model.model.layers.19.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
452 |
-
"language_model.model.layers.19.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
453 |
-
"language_model.model.layers.19.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
454 |
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
455 |
-
"language_model.model.layers.19.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
456 |
-
"language_model.model.layers.19.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
457 |
-
"language_model.model.layers.19.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
458 |
-
"language_model.model.layers.19.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
459 |
-
"language_model.model.layers.19.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
460 |
"language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
461 |
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
462 |
-
"language_model.model.layers.19.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
463 |
-
"language_model.model.layers.19.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
464 |
-
"language_model.model.layers.19.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
465 |
-
"language_model.model.layers.19.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
466 |
-
"language_model.model.layers.19.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
467 |
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
468 |
-
"language_model.model.layers.19.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
469 |
-
"language_model.model.layers.19.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
470 |
-
"language_model.model.layers.19.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
471 |
-
"language_model.model.layers.19.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
472 |
-
"language_model.model.layers.19.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
473 |
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
474 |
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
475 |
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
@@ -479,10 +389,30 @@
|
|
479 |
"language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
480 |
"language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
481 |
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
482 |
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
483 |
"language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
484 |
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
485 |
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
486 |
"language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
487 |
"language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
488 |
"language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
@@ -627,28 +557,28 @@
|
|
627 |
"language_model.model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
628 |
"language_model.model.layers.22.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
629 |
"language_model.model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
630 |
-
"language_model.model.layers.23.input_layernorm.weight": "model-
|
631 |
-
"language_model.model.layers.23.mlp.down_proj.weight": "model-
|
632 |
-
"language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-
|
633 |
-
"language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-
|
634 |
-
"language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-
|
635 |
-
"language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-
|
636 |
-
"language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
637 |
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
638 |
"language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
639 |
"language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
640 |
"language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
641 |
"language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
642 |
"language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
643 |
-
"language_model.model.layers.23.mlp.up_proj.weight": "model-
|
644 |
-
"language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-
|
645 |
-
"language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-
|
646 |
-
"language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-
|
647 |
-
"language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-
|
648 |
-
"language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
649 |
-
"language_model.model.layers.23.post_attention_layernorm.weight": "model-
|
650 |
-
"language_model.model.layers.23.post_feedforward_layernorm.weight": "model-
|
651 |
-
"language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-
|
652 |
"language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
653 |
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
654 |
"language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
@@ -682,47 +612,47 @@
|
|
682 |
"language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
683 |
"language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
684 |
"language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
685 |
-
"language_model.model.layers.24.mlp.gate_proj.weight": "model-
|
686 |
-
"language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-
|
687 |
-
"language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-
|
688 |
-
"language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-
|
689 |
-
"language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-
|
690 |
-
"language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
691 |
-
"language_model.model.layers.24.mlp.up_proj.weight": "model-
|
692 |
-
"language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-
|
693 |
-
"language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-
|
694 |
-
"language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-
|
695 |
-
"language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-
|
696 |
-
"language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
697 |
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
698 |
"language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
699 |
"language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
700 |
-
"language_model.model.layers.24.self_attn.k_norm.weight": "model-
|
701 |
-
"language_model.model.layers.24.self_attn.k_proj.weight": "model-
|
702 |
-
"language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-
|
703 |
-
"language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-
|
704 |
-
"language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-
|
705 |
-
"language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-
|
706 |
-
"language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
707 |
-
"language_model.model.layers.24.self_attn.o_proj.weight": "model-
|
708 |
-
"language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-
|
709 |
-
"language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-
|
710 |
-
"language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-
|
711 |
-
"language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-
|
712 |
-
"language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
713 |
-
"language_model.model.layers.24.self_attn.q_norm.weight": "model-
|
714 |
-
"language_model.model.layers.24.self_attn.q_proj.weight": "model-
|
715 |
-
"language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-
|
716 |
-
"language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-
|
717 |
-
"language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-
|
718 |
-
"language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-
|
719 |
-
"language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
720 |
-
"language_model.model.layers.24.self_attn.v_proj.weight": "model-
|
721 |
-
"language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-
|
722 |
-
"language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-
|
723 |
-
"language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-
|
724 |
-
"language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-
|
725 |
-
"language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
726 |
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
727 |
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
728 |
"language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
@@ -1456,19 +1386,39 @@
|
|
1456 |
"language_model.model.layers.39.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
1457 |
"language_model.model.layers.39.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
1458 |
"language_model.model.layers.39.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
1459 |
-
"language_model.model.layers.4.input_layernorm.weight": "model-
|
1460 |
-
"language_model.model.layers.4.mlp.down_proj.weight": "model-
|
1461 |
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
1462 |
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
1463 |
-
"language_model.model.layers.4.post_attention_layernorm.weight": "model-
|
1464 |
-
"language_model.model.layers.4.post_feedforward_layernorm.weight": "model-
|
1465 |
-
"language_model.model.layers.4.pre_feedforward_layernorm.weight": "model-
|
1466 |
"language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
1467 |
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1468 |
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1469 |
"language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
1470 |
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1471 |
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1472 |
"language_model.model.layers.40.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
1473 |
"language_model.model.layers.40.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
1474 |
"language_model.model.layers.40.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
@@ -1855,17 +1805,37 @@
|
|
1855 |
"language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
1856 |
"language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1857 |
"language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1858 |
-
"language_model.model.layers.5.mlp.gate_proj.weight": "model-
|
1859 |
"language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
1860 |
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
1861 |
"language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1862 |
"language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1863 |
-
"language_model.model.layers.5.self_attn.k_norm.weight": "model-
|
1864 |
-
"language_model.model.layers.5.self_attn.k_proj.weight": "model-
|
1865 |
-
"language_model.model.layers.5.self_attn.
|
1866 |
-
"language_model.model.layers.5.self_attn.
|
1867 |
-
"language_model.model.layers.5.self_attn.
|
1868 |
-
"language_model.model.layers.5.self_attn.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1869 |
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1870 |
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1871 |
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
@@ -1875,10 +1845,30 @@
|
|
1875 |
"language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1876 |
"language_model.model.layers.6.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
1877 |
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1878 |
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1879 |
"language_model.model.layers.6.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
1880 |
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1881 |
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1882 |
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1883 |
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1884 |
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
@@ -1927,8 +1917,23 @@
|
|
1927 |
"language_model.model.layers.8.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1928 |
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1929 |
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1930 |
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1931 |
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
1932 |
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
1933 |
"language_model.model.layers.9.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1934 |
"language_model.model.layers.9.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
+
"total_size": 12767500929
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
|
|
13 |
"language_model.model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
14 |
"language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
15 |
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
16 |
+
"language_model.model.layers.0.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
17 |
+
"language_model.model.layers.0.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
18 |
+
"language_model.model.layers.0.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
19 |
+
"language_model.model.layers.0.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
20 |
+
"language_model.model.layers.0.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
21 |
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
22 |
+
"language_model.model.layers.0.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
23 |
+
"language_model.model.layers.0.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
24 |
+
"language_model.model.layers.0.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
25 |
+
"language_model.model.layers.0.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
26 |
+
"language_model.model.layers.0.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
27 |
"language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
28 |
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
29 |
+
"language_model.model.layers.0.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
30 |
+
"language_model.model.layers.0.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
31 |
+
"language_model.model.layers.0.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
32 |
+
"language_model.model.layers.0.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
33 |
+
"language_model.model.layers.0.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
34 |
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
35 |
+
"language_model.model.layers.0.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
36 |
+
"language_model.model.layers.0.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
37 |
+
"language_model.model.layers.0.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
38 |
+
"language_model.model.layers.0.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
39 |
+
"language_model.model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
40 |
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
41 |
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
42 |
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
72 |
"language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
73 |
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
74 |
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
75 |
+
"language_model.model.layers.10.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
76 |
+
"language_model.model.layers.10.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
77 |
+
"language_model.model.layers.10.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
78 |
+
"language_model.model.layers.10.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
79 |
+
"language_model.model.layers.10.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
80 |
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
81 |
+
"language_model.model.layers.10.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
82 |
+
"language_model.model.layers.10.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
83 |
+
"language_model.model.layers.10.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
84 |
+
"language_model.model.layers.10.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
85 |
+
"language_model.model.layers.10.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
86 |
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
87 |
+
"language_model.model.layers.10.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
88 |
+
"language_model.model.layers.10.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
89 |
+
"language_model.model.layers.10.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
90 |
+
"language_model.model.layers.10.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
91 |
+
"language_model.model.layers.10.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
92 |
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
93 |
"language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
94 |
"language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
95 |
"language_model.model.layers.10.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
96 |
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
97 |
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
98 |
"language_model.model.layers.10.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
99 |
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
100 |
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
101 |
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
102 |
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
103 |
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
199 |
"language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
200 |
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
201 |
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
202 |
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
203 |
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
204 |
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
205 |
"language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
206 |
"language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
|
|
302 |
"language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
303 |
"language_model.model.layers.16.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
304 |
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
305 |
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
306 |
"language_model.model.layers.16.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
307 |
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
308 |
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
309 |
"language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
310 |
"language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
311 |
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
312 |
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
313 |
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
314 |
"language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
315 |
"language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
|
|
341 |
"language_model.model.layers.17.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
342 |
"language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
343 |
"language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
344 |
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
345 |
"language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
346 |
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
347 |
"language_model.model.layers.18.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
348 |
"language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
349 |
"language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
350 |
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
351 |
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
352 |
"language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
353 |
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
354 |
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
355 |
"language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
356 |
"language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
357 |
"language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
|
|
376 |
"language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
377 |
"language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
378 |
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
379 |
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
380 |
"language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
381 |
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
382 |
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
|
|
|
|
|
|
|
|
383 |
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
384 |
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
385 |
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
|
|
389 |
"language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
390 |
"language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
391 |
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
392 |
+
"language_model.model.layers.2.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
393 |
+
"language_model.model.layers.2.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
394 |
+
"language_model.model.layers.2.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
395 |
+
"language_model.model.layers.2.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
396 |
+
"language_model.model.layers.2.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
397 |
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
398 |
+
"language_model.model.layers.2.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
399 |
+
"language_model.model.layers.2.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
400 |
+
"language_model.model.layers.2.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
401 |
+
"language_model.model.layers.2.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
402 |
+
"language_model.model.layers.2.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
403 |
"language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
404 |
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
405 |
+
"language_model.model.layers.2.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
406 |
+
"language_model.model.layers.2.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
407 |
+
"language_model.model.layers.2.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
408 |
+
"language_model.model.layers.2.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
409 |
+
"language_model.model.layers.2.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
410 |
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
411 |
+
"language_model.model.layers.2.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
412 |
+
"language_model.model.layers.2.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
413 |
+
"language_model.model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
414 |
+
"language_model.model.layers.2.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
415 |
+
"language_model.model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
416 |
"language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
417 |
"language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
418 |
"language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
|
|
557 |
"language_model.model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
558 |
"language_model.model.layers.22.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
559 |
"language_model.model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
560 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
561 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
562 |
+
"language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
563 |
+
"language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
564 |
+
"language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
565 |
+
"language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
566 |
+
"language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
567 |
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
568 |
"language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
569 |
"language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
570 |
"language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
571 |
"language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
572 |
"language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
573 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
574 |
+
"language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
575 |
+
"language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
576 |
+
"language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
577 |
+
"language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
578 |
+
"language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
579 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
580 |
+
"language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
581 |
+
"language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
582 |
"language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
583 |
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
584 |
"language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
|
|
612 |
"language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
613 |
"language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
614 |
"language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
615 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
616 |
+
"language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
617 |
+
"language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
618 |
+
"language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
619 |
+
"language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
620 |
+
"language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
621 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
622 |
+
"language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
623 |
+
"language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
624 |
+
"language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
625 |
+
"language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
626 |
+
"language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
627 |
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
628 |
"language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
629 |
"language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
|
630 |
+
"language_model.model.layers.24.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
|
631 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
632 |
+
"language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
633 |
+
"language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
634 |
+
"language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
635 |
+
"language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
636 |
+
"language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
637 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
638 |
+
"language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
639 |
+
"language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
640 |
+
"language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
641 |
+
"language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
642 |
+
"language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
643 |
+
"language_model.model.layers.24.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
|
644 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
645 |
+
"language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
646 |
+
"language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
647 |
+
"language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
648 |
+
"language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
649 |
+
"language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
650 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
651 |
+
"language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
652 |
+
"language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
|
653 |
+
"language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
654 |
+
"language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
655 |
+
"language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
656 |
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
657 |
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
658 |
"language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
|
|
1386 |
"language_model.model.layers.39.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
|
1387 |
"language_model.model.layers.39.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
|
1388 |
"language_model.model.layers.39.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
1389 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
1390 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
1391 |
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
1392 |
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
1393 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
1394 |
+
"language_model.model.layers.4.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
1395 |
+
"language_model.model.layers.4.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
|
1396 |
"language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
1397 |
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
1398 |
+
"language_model.model.layers.4.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1399 |
+
"language_model.model.layers.4.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1400 |
+
"language_model.model.layers.4.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1401 |
+
"language_model.model.layers.4.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1402 |
+
"language_model.model.layers.4.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1403 |
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
1404 |
+
"language_model.model.layers.4.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1405 |
+
"language_model.model.layers.4.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1406 |
+
"language_model.model.layers.4.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1407 |
+
"language_model.model.layers.4.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1408 |
+
"language_model.model.layers.4.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1409 |
"language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
1410 |
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
1411 |
+
"language_model.model.layers.4.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1412 |
+
"language_model.model.layers.4.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1413 |
+
"language_model.model.layers.4.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1414 |
+
"language_model.model.layers.4.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1415 |
+
"language_model.model.layers.4.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1416 |
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
1417 |
+
"language_model.model.layers.4.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1418 |
+
"language_model.model.layers.4.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1419 |
+
"language_model.model.layers.4.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1420 |
+
"language_model.model.layers.4.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1421 |
+
"language_model.model.layers.4.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1422 |
"language_model.model.layers.40.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
1423 |
"language_model.model.layers.40.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
1424 |
"language_model.model.layers.40.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
|
|
|
1805 |
"language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
|
1806 |
"language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1807 |
"language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1808 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
1809 |
"language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
1810 |
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
1811 |
"language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1812 |
"language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1813 |
+
"language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
1814 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
1815 |
+
"language_model.model.layers.5.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1816 |
+
"language_model.model.layers.5.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1817 |
+
"language_model.model.layers.5.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1818 |
+
"language_model.model.layers.5.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1819 |
+
"language_model.model.layers.5.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1820 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
1821 |
+
"language_model.model.layers.5.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1822 |
+
"language_model.model.layers.5.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1823 |
+
"language_model.model.layers.5.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1824 |
+
"language_model.model.layers.5.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1825 |
+
"language_model.model.layers.5.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1826 |
+
"language_model.model.layers.5.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
1827 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
1828 |
+
"language_model.model.layers.5.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1829 |
+
"language_model.model.layers.5.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1830 |
+
"language_model.model.layers.5.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1831 |
+
"language_model.model.layers.5.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1832 |
+
"language_model.model.layers.5.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1833 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
1834 |
+
"language_model.model.layers.5.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
|
1835 |
+
"language_model.model.layers.5.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
|
1836 |
+
"language_model.model.layers.5.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
|
1837 |
+
"language_model.model.layers.5.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
|
1838 |
+
"language_model.model.layers.5.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
|
1839 |
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1840 |
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1841 |
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
1845 |
"language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1846 |
"language_model.model.layers.6.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
1847 |
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
1848 |
+
"language_model.model.layers.6.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1849 |
+
"language_model.model.layers.6.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1850 |
+
"language_model.model.layers.6.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1851 |
+
"language_model.model.layers.6.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1852 |
+
"language_model.model.layers.6.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1853 |
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
1854 |
+
"language_model.model.layers.6.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1855 |
+
"language_model.model.layers.6.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1856 |
+
"language_model.model.layers.6.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1857 |
+
"language_model.model.layers.6.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1858 |
+
"language_model.model.layers.6.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1859 |
"language_model.model.layers.6.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
1860 |
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
1861 |
+
"language_model.model.layers.6.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1862 |
+
"language_model.model.layers.6.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1863 |
+
"language_model.model.layers.6.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1864 |
+
"language_model.model.layers.6.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1865 |
+
"language_model.model.layers.6.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1866 |
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
1867 |
+
"language_model.model.layers.6.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1868 |
+
"language_model.model.layers.6.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1869 |
+
"language_model.model.layers.6.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1870 |
+
"language_model.model.layers.6.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1871 |
+
"language_model.model.layers.6.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1872 |
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1873 |
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1874 |
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
|
|
1917 |
"language_model.model.layers.8.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1918 |
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
1919 |
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
1920 |
+
"language_model.model.layers.9.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1921 |
+
"language_model.model.layers.9.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1922 |
+
"language_model.model.layers.9.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1923 |
+
"language_model.model.layers.9.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1924 |
+
"language_model.model.layers.9.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1925 |
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
1926 |
+
"language_model.model.layers.9.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1927 |
+
"language_model.model.layers.9.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1928 |
+
"language_model.model.layers.9.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1929 |
+
"language_model.model.layers.9.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1930 |
+
"language_model.model.layers.9.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1931 |
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
1932 |
+
"language_model.model.layers.9.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
|
1933 |
+
"language_model.model.layers.9.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
|
1934 |
+
"language_model.model.layers.9.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
|
1935 |
+
"language_model.model.layers.9.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
|
1936 |
+
"language_model.model.layers.9.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
|
1937 |
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
1938 |
"language_model.model.layers.9.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
1939 |
"language_model.model.layers.9.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
|
tokenizer_config.json
CHANGED
@@ -51325,7 +51325,6 @@
|
|
51325 |
},
|
51326 |
"boi_token": "<start_of_image>",
|
51327 |
"bos_token": "<bos>",
|
51328 |
-
"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
|
51329 |
"clean_up_tokenization_spaces": false,
|
51330 |
"eoi_token": "<end_of_image>",
|
51331 |
"eos_token": "<end_of_turn>",
|
|
|
51325 |
},
|
51326 |
"boi_token": "<start_of_image>",
|
51327 |
"bos_token": "<bos>",
|
|
|
51328 |
"clean_up_tokenization_spaces": false,
|
51329 |
"eoi_token": "<end_of_image>",
|
51330 |
"eos_token": "<end_of_turn>",
|