huihui-ai commited on
Commit
74dc7a0
·
verified ·
1 Parent(s): 0817047

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,39 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - unsloth/gpt-oss-20b-BF16
4
+ license: apache-2.0
5
+ pipeline_tag: text-generation
6
+ library_name: transformers
7
+ tags:
8
+ - vllm
9
+ - unsloth
10
+ - abliterated
11
+ - uncensored
12
+ ---
13
+
14
+ # huihui-ai/Huihui-gpt-oss-20b-BF16-abliterated
15
+
16
+
17
+ This is an uncensored version of [unsloth/gpt-oss-20b-BF16](https://huggingface.co/unsloth/gpt-oss-20b-BF16) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).
18
+ ## Usage Warnings
19
+
20
+
21
+ - **Risk of Sensitive or Controversial Outputs**: This model’s safety filtering has been significantly reduced, potentially generating sensitive, controversial, or inappropriate content. Users should exercise caution and rigorously review generated outputs.
22
+
23
+ - **Not Suitable for All Audiences**: Due to limited content filtering, the model’s outputs may be inappropriate for public settings, underage users, or applications requiring high security.
24
+
25
+ - **Legal and Ethical Responsibilities**: Users must ensure their usage complies with local laws and ethical standards. Generated content may carry legal or ethical risks, and users are solely responsible for any consequences.
26
+
27
+ - **Research and Experimental Use**: It is recommended to use this model for research, testing, or controlled environments, avoiding direct use in production or public-facing commercial applications.
28
+
29
+ - **Monitoring and Review Recommendations**: Users are strongly advised to monitor model outputs in real-time and conduct manual reviews when necessary to prevent the dissemination of inappropriate content.
30
+
31
+ - **No Default Safety Guarantees**: Unlike standard models, this model has not undergone rigorous safety optimization. huihui.ai bears no responsibility for any consequences arising from its use.
32
+
33
+
34
+ ### Donation
35
+ ##### Your donation helps us continue our further development and improvement, a cup of coffee can do it.
36
+ - bitcoin:
37
+ ```
38
+ bc1qqnkhuchxw0zqjh2ku3lu4hq45hc6gy84uk70ge
39
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth chat template fixes. Edited from ggml-org & OpenAI #}
2
+ {#-
3
+ In addition to the normal inputs of `messages` and `tools`, this template also accepts the
4
+ following kwargs:
5
+ - "builtin_tools": A list, can contain "browser" and/or "python".
6
+ - "model_identity": A string that optionally describes the model identity.
7
+ - "reasoning_effort": A string that describes the reasoning effort, defaults to "medium".
8
+ #}
9
+
10
+ {#- Tool Definition Rendering ============================================== #}
11
+ {%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%}
12
+ {%- if param_spec.type == "array" -%}
13
+ {%- if param_spec['items'] -%}
14
+ {%- if param_spec['items']['type'] == "string" -%}
15
+ {{- "string[]" }}
16
+ {%- elif param_spec['items']['type'] == "number" -%}
17
+ {{- "number[]" }}
18
+ {%- elif param_spec['items']['type'] == "integer" -%}
19
+ {{- "number[]" }}
20
+ {%- elif param_spec['items']['type'] == "boolean" -%}
21
+ {{- "boolean[]" }}
22
+ {%- else -%}
23
+ {%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%}
24
+ {%- if inner_type == "object | object" or inner_type|length > 50 -%}
25
+ {{- "any[]" }}
26
+ {%- else -%}
27
+ {{- inner_type + "[]" }}
28
+ {%- endif -%}
29
+ {%- endif -%}
30
+ {%- if param_spec.nullable -%}
31
+ {{- " | null" }}
32
+ {%- endif -%}
33
+ {%- else -%}
34
+ {{- "any[]" }}
35
+ {%- if param_spec.nullable -%}
36
+ {{- " | null" }}
37
+ {%- endif -%}
38
+ {%- endif -%}
39
+ {%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%}
40
+ {#- Handle array of types like ["object", "object"] from Union[dict, list] #}
41
+ {%- if param_spec.type | length > 1 -%}
42
+ {{- param_spec.type | join(" | ") }}
43
+ {%- else -%}
44
+ {{- param_spec.type[0] }}
45
+ {%- endif -%}
46
+ {%- elif param_spec.oneOf -%}
47
+ {#- Handle oneOf schemas - check for complex unions and fallback to any #}
48
+ {%- set has_object_variants = false -%}
49
+ {%- for variant in param_spec.oneOf -%}
50
+ {%- if variant.type == "object" -%}
51
+ {%- set has_object_variants = true -%}
52
+ {%- endif -%}
53
+ {%- endfor -%}
54
+ {%- if has_object_variants and param_spec.oneOf|length > 1 -%}
55
+ {{- "any" }}
56
+ {%- else -%}
57
+ {%- for variant in param_spec.oneOf -%}
58
+ {{- render_typescript_type(variant, required_params) -}}
59
+ {%- if variant.description %}
60
+ {{- "// " + variant.description }}
61
+ {%- endif -%}
62
+ {%- if variant.default is defined %}
63
+ {{ "// default: " + variant.default|tojson }}
64
+ {%- endif -%}
65
+ {%- if not loop.last %}
66
+ {{- " | " }}
67
+ {% endif -%}
68
+ {%- endfor -%}
69
+ {%- endif -%}
70
+ {%- elif param_spec.type == "string" -%}
71
+ {%- if param_spec.enum -%}
72
+ {{- '"' + param_spec.enum|join('" | "') + '"' -}}
73
+ {%- else -%}
74
+ {{- "string" }}
75
+ {%- if param_spec.nullable %}
76
+ {{- " | null" }}
77
+ {%- endif -%}
78
+ {%- endif -%}
79
+ {%- elif param_spec.type == "number" -%}
80
+ {{- "number" }}
81
+ {%- elif param_spec.type == "integer" -%}
82
+ {{- "number" }}
83
+ {%- elif param_spec.type == "boolean" -%}
84
+ {{- "boolean" }}
85
+
86
+ {%- elif param_spec.type == "object" -%}
87
+ {%- if param_spec.properties -%}
88
+ {{- "{\n" }}
89
+ {%- for prop_name, prop_spec in param_spec.properties.items() -%}
90
+ {{- prop_name -}}
91
+ {%- if prop_name not in (param_spec.required or []) -%}
92
+ {{- "?" }}
93
+ {%- endif -%}
94
+ {{- ": " }}
95
+ {{ render_typescript_type(prop_spec, param_spec.required or []) }}
96
+ {%- if not loop.last -%}
97
+ {{-", " }}
98
+ {%- endif -%}
99
+ {%- endfor -%}
100
+ {{- "}" }}
101
+ {%- else -%}
102
+ {{- "object" }}
103
+ {%- endif -%}
104
+ {%- else -%}
105
+ {{- "any" }}
106
+ {%- endif -%}
107
+ {%- endmacro -%}
108
+
109
+ {%- macro render_tool_namespace(namespace_name, tools) -%}
110
+ {{- "## " + namespace_name + "\n\n" }}
111
+ {{- "namespace " + namespace_name + " {\n\n" }}
112
+ {%- for tool in tools %}
113
+ {%- set tool = tool.function %}
114
+ {{- "// " + tool.description + "\n" }}
115
+ {{- "type "+ tool.name + " = " }}
116
+ {%- if tool.parameters and tool.parameters.properties -%}
117
+ {{- "(_: " }}
118
+ {{- "{\n" }}
119
+ {%- for param_name, param_spec in tool.parameters.properties.items() %}
120
+ {{- "// " + param_spec.description + "\n" }}
121
+ {{- param_name }}
122
+ {%- if param_name not in (tool.parameters.required or []) -%}
123
+ {{- "?" }}
124
+ {%- endif -%}
125
+ {{- ": " }}
126
+ {{- render_typescript_type(param_spec, tool.parameters.required or []) }}
127
+ {%- if param_spec.default is defined -%}
128
+ {%- if param_spec.enum %}
129
+ {{- ", // default: " + param_spec.default }}
130
+ {%- elif param_spec.oneOf %}
131
+ {{- "// default: " + param_spec.default }}
132
+ {%- else %}
133
+ {{- ", // default: " + param_spec.default|tojson }}
134
+ {%- endif -%}
135
+ {%- endif -%}
136
+ {%- if not loop.last %}
137
+ {{- ",\n" }}
138
+ {%- else %}
139
+ {{- "\n" }}
140
+ {%- endif -%}
141
+ {%- endfor %}
142
+ {{- "}) => any;\n\n" }}
143
+ {%- else -%}
144
+ {{- "() => any;\n\n" }}
145
+ {%- endif -%}
146
+ {%- endfor %}
147
+ {{- "} // namespace " + namespace_name }}
148
+ {%- endmacro -%}
149
+
150
+ {%- macro render_builtin_tools(browser_tool, python_tool) -%}
151
+ {%- if browser_tool %}
152
+ {{- "## browser\n\n" }}
153
+ {{- "// Tool for browsing.\n" }}
154
+ {{- "// The `cursor` appears in brackets before each browsing display: `[{cursor}]`.\n" }}
155
+ {{- "// Cite information from the tool using the following format:\n" }}
156
+ {{- "// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.\n" }}
157
+ {{- "// Do not quote more than 10 words directly from the tool output.\n" }}
158
+ {{- "// sources=web (default: web)\n" }}
159
+ {{- "namespace browser {\n\n" }}
160
+ {{- "// Searches for information related to `query` and displays `topn` results.\n" }}
161
+ {{- "type search = (_: {\n" }}
162
+ {{- "query: string,\n" }}
163
+ {{- "topn?: number, // default: 10\n" }}
164
+ {{- "source?: string,\n" }}
165
+ {{- "}) => any;\n\n" }}
166
+ {{- "// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.\n" }}
167
+ {{- "// Valid link ids are displayed with the formatting: `【{id}†.*】`.\n" }}
168
+ {{- "// If `cursor` is not provided, the most recent page is implied.\n" }}
169
+ {{- "// If `id` is a string, it is treated as a fully qualified URL associated with `source`.\n" }}
170
+ {{- "// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.\n" }}
171
+ {{- "// Use this function without `id` to scroll to a new location of an opened page.\n" }}
172
+ {{- "type open = (_: {\n" }}
173
+ {{- "id?: number | string, // default: -1\n" }}
174
+ {{- "cursor?: number, // default: -1\n" }}
175
+ {{- "loc?: number, // default: -1\n" }}
176
+ {{- "num_lines?: number, // default: -1\n" }}
177
+ {{- "view_source?: boolean, // default: false\n" }}
178
+ {{- "source?: string,\n" }}
179
+ {{- "}) => any;\n\n" }}
180
+ {{- "// Finds exact matches of `pattern` in the current page, or the page given by `cursor`.\n" }}
181
+ {{- "type find = (_: {\n" }}
182
+ {{- "pattern: string,\n" }}
183
+ {{- "cursor?: number, // default: -1\n" }}
184
+ {{- "}) => any;\n\n" }}
185
+ {{- "} // namespace browser\n\n" }}
186
+ {%- endif -%}
187
+
188
+ {%- if python_tool %}
189
+ {{- "## python\n\n" }}
190
+ {{- "Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).\n\n" }}
191
+ {{- "When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster.\n\n" }}
192
+ {%- endif -%}
193
+ {%- endmacro -%}
194
+
195
+ {#- System Message Construction ============================================ #}
196
+ {%- macro build_system_message() -%}
197
+ {%- if model_identity is not defined %}
198
+ {{- "You are ChatGPT, a large language model trained by OpenAI.\n" -}}
199
+ {%- else %}
200
+ {{- model_identity }}
201
+ {%- endif %}
202
+ {{- "Knowledge cutoff: 2024-06\n" }}
203
+ {{- "Current date: " + strftime_now("%Y-%m-%d") + "\n\n" }}
204
+ {%- if reasoning_effort is not defined %}
205
+ {%- set reasoning_effort = "medium" %}
206
+ {%- endif %}
207
+ {{- "Reasoning: " + reasoning_effort + "\n\n" }}
208
+ {%- if builtin_tools is defined %}
209
+ {{- "# Tools\n\n" }}
210
+ {%- set available_builtin_tools = namespace(browser=false, python=false) %}
211
+ {%- for tool in builtin_tools %}
212
+ {%- if tool == "browser" %}
213
+ {%- set available_builtin_tools.browser = true %}
214
+ {%- elif tool == "python" %}
215
+ {%- set available_builtin_tools.python = true %}
216
+ {%- endif %}
217
+ {%- endfor %}
218
+ {{- render_builtin_tools(available_builtin_tools.browser, available_builtin_tools.python) }}
219
+ {%- endif -%}
220
+ {{- "# Valid channels: analysis, commentary, final. Channel must be included for every message." }}
221
+ {%- if tools is defined -%}
222
+ {{- "\nCalls to these tools must go to the commentary channel: 'functions'." }}
223
+ {%- endif -%}
224
+ {%- endmacro -%}
225
+
226
+ {#- Main Template Logic ================================================= #}
227
+ {#- Set defaults #}
228
+
229
+ {#- Render system message #}
230
+ {{- "<|start|>system<|message|>" }}
231
+ {{- build_system_message() }}
232
+ {{- "<|end|>" }}
233
+
234
+ {#- Extract developer message #}
235
+ {%- if messages[0].role == "developer" or messages[0].role == "system" %}
236
+ {%- set developer_message = messages[0].content %}
237
+ {%- set loop_messages = messages[1:] %}
238
+ {%- else %}
239
+ {%- set developer_message = "" %}
240
+ {%- set loop_messages = messages %}
241
+ {%- endif %}
242
+
243
+ {#- Render developer message #}
244
+ {%- if developer_message or tools %}
245
+ {{- "<|start|>developer<|message|>" }}
246
+ {%- if developer_message %}
247
+ {{- "# Instructions\n\n" }}
248
+ {{- developer_message }}
249
+ {%- endif %}
250
+ {%- if tools -%}
251
+ {{- "\n\n" }}
252
+ {{- "# Tools\n\n" }}
253
+ {{- render_tool_namespace("functions", tools) }}
254
+ {%- endif -%}
255
+ {{- "<|end|>" }}
256
+ {%- endif %}
257
+
258
+ {#- Render messages #}
259
+ {%- set last_tool_call = namespace(name=none) %}
260
+ {%- for message in loop_messages -%}
261
+ {#- At this point only assistant/user/tool messages should remain #}
262
+ {%- if message.role == 'assistant' -%}
263
+ {%- if "tool_calls" in message %}
264
+ {#- We assume max 1 tool call per message, and so we infer the tool call name #}
265
+ {#- in "tool" messages from the most recent assistant tool call name #}
266
+ {%- set tool_call = message.tool_calls[0] %}
267
+ {%- if tool_call.function %}
268
+ {%- set tool_call = tool_call.function %}
269
+ {%- endif %}
270
+ {%- if message.content %}
271
+ {{- "<|start|>assistant<|channel|>analysis<|message|>" + message.content + "<|end|>" }}
272
+ {%- endif %}
273
+ {{- "<|start|>assistant to=" }}
274
+ {{- "functions." + tool_call.name + "<|channel|>commentary json<|message|>" }}
275
+ {{- tool_call.arguments|tojson }}
276
+ {{- "<|call|>" }}
277
+ {%- set last_tool_call.name = tool_call.name %}
278
+ {%- elif "thinking" in message and loop.last and not add_generation_prompt %}
279
+ {#- Only render the CoT if the final turn is an assistant turn and add_generation_prompt is false #}
280
+ {#- This is a situation that should only occur in training, never in inference. #}
281
+ {{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
282
+ {#- <|return|> indicates the end of generation, but <|end|> does not #}
283
+ {#- <|return|> should never be an input to the model, but we include it as the final token #}
284
+ {#- when training, so the model learns to emit it. #}
285
+ {{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|return|>" }}
286
+ {%- set last_tool_call.name = none %}
287
+ {%- elif "thinking" in message %}
288
+ {#- CoT is dropped during all previous turns, so we never render it for inference #}
289
+ {{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|end|>" }}
290
+ {%- set last_tool_call.name = none %}
291
+ {%- elif loop.last and not add_generation_prompt %}
292
+ {#- <|return|> indicates the end of generation, but <|end|> does not #}
293
+ {#- <|return|> should never be an input to the model, but we include it as the final token #}
294
+ {#- when training, so the model learns to emit it. #}
295
+ {{- "<|start|>assistant<|message|>" + message.content + "<|return|>" }}
296
+ {%- else %}
297
+ {{- "<|start|>assistant<|message|>" + message.content + "<|end|>" }}
298
+ {%- set last_tool_call.name = none %}
299
+ {%- endif %}
300
+ {%- elif message.role == 'tool' -%}
301
+ {%- if last_tool_call.name is none %}
302
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
303
+ {%- endif %}
304
+ {{- "<|start|>functions." + last_tool_call.name }}
305
+ {{- " to=assistant<|channel|>commentary<|message|>" + message.content|tojson + "<|end|>" }}
306
+ {%- else -%}
307
+ {{- "<|start|>user<|message|>" + message.content + "<|end|>" }}
308
+ {%- endif -%}
309
+ {%- endfor -%}
310
+
311
+ {#- Generation prompt #}
312
+ {%- if add_generation_prompt -%}
313
+ <|start|>assistant
314
+ {%- endif -%}
315
+ {# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth chat template fixes. Edited from ggml-org & OpenAI #}
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GptOssForCausalLM"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 200002,
8
+ "experts_per_token": 4,
9
+ "head_dim": 64,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2880,
12
+ "initial_context_length": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 2880,
15
+ "layer_types": [
16
+ "sliding_attention",
17
+ "full_attention",
18
+ "sliding_attention",
19
+ "full_attention",
20
+ "sliding_attention",
21
+ "full_attention",
22
+ "sliding_attention",
23
+ "full_attention",
24
+ "sliding_attention",
25
+ "full_attention",
26
+ "sliding_attention",
27
+ "full_attention",
28
+ "sliding_attention",
29
+ "full_attention",
30
+ "sliding_attention",
31
+ "full_attention",
32
+ "sliding_attention",
33
+ "full_attention",
34
+ "sliding_attention",
35
+ "full_attention",
36
+ "sliding_attention",
37
+ "full_attention",
38
+ "sliding_attention",
39
+ "full_attention"
40
+ ],
41
+ "max_position_embeddings": 131072,
42
+ "model_type": "gpt_oss",
43
+ "num_attention_heads": 64,
44
+ "num_experts_per_tok": 4,
45
+ "num_hidden_layers": 24,
46
+ "num_key_value_heads": 8,
47
+ "num_local_experts": 32,
48
+ "output_router_logits": false,
49
+ "pad_token_id": 199999,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "beta_fast": 32.0,
53
+ "beta_slow": 1.0,
54
+ "factor": 32.0,
55
+ "original_max_position_embeddings": 4096,
56
+ "rope_type": "yarn",
57
+ "truncate": false
58
+ },
59
+ "rope_theta": 150000,
60
+ "router_aux_loss_coef": 0.9,
61
+ "sliding_window": 128,
62
+ "swiglu_limit": 7.0,
63
+ "tie_word_embeddings": false,
64
+ "torch_dtype": "bfloat16",
65
+ "transformers_version": "4.56.0.dev0",
66
+ "use_cache": true,
67
+ "vocab_size": 201088
68
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 199998,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 200002,
6
+ 199999
7
+ ],
8
+ "pad_token_id": 199999,
9
+ "transformers_version": "4.56.0.dev0"
10
+ }
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88868ce20327a95a8f2d3ba66c0833c2d6544fe04349ac2a1eb8971853d1dbe8
3
+ size 4504304664
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a38d5331c0a5425e7fce1a7b2f03a3858d5f7e27ee1a86db6103f8192331b3d4
3
+ size 4939127656
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e397b49bdf236be257a728243afa19ad96ced9ff0e1e4e49f33991bcd1bd883d
3
+ size 4939127656
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d42f79dbeee64c434203e45baadcaa8642e4fe2e7ae9192a9da5414803e6543
3
+ size 4939127680
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c952d81437079677e0ad5ed816bd69ccfc96ba1f6bcc81eb671b8ad18dddf48
3
+ size 4939127704
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761e2dcff304c71d06ae372b83b925ec718a0335a652dacb2f16da07f8ed836b
3
+ size 4939127704
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34e28f9a595921f46f62d35a7f8bb140b39384922c178316cbcce43172346d34
3
+ size 4939127704
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0542dfb0fad73408a87194159bb0715e067f3f1b650d359249acbf317784ba5
3
+ size 4939127704
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eb975c2cceb4aedf26218085ddee293728654c83bbae85b7704b22ed148e5a7
3
+ size 2751362856
model.safetensors.index.json ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 20914757184,
4
+ "total_size": 41829514368
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00009-of-00009.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00009.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
10
+ "model.layers.0.mlp.experts.down_proj": "model-00001-of-00009.safetensors",
11
+ "model.layers.0.mlp.experts.down_proj_bias": "model-00001-of-00009.safetensors",
12
+ "model.layers.0.mlp.experts.gate_up_proj": "model-00001-of-00009.safetensors",
13
+ "model.layers.0.mlp.experts.gate_up_proj_bias": "model-00001-of-00009.safetensors",
14
+ "model.layers.0.mlp.router.bias": "model-00001-of-00009.safetensors",
15
+ "model.layers.0.mlp.router.weight": "model-00001-of-00009.safetensors",
16
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
17
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
18
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
19
+ "model.layers.0.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
20
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
21
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
22
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
23
+ "model.layers.0.self_attn.sinks": "model-00001-of-00009.safetensors",
24
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
25
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
26
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
27
+ "model.layers.1.mlp.experts.down_proj": "model-00001-of-00009.safetensors",
28
+ "model.layers.1.mlp.experts.down_proj_bias": "model-00001-of-00009.safetensors",
29
+ "model.layers.1.mlp.experts.gate_up_proj": "model-00001-of-00009.safetensors",
30
+ "model.layers.1.mlp.experts.gate_up_proj_bias": "model-00001-of-00009.safetensors",
31
+ "model.layers.1.mlp.router.bias": "model-00001-of-00009.safetensors",
32
+ "model.layers.1.mlp.router.weight": "model-00001-of-00009.safetensors",
33
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
34
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
35
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
36
+ "model.layers.1.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
37
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
38
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
39
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
40
+ "model.layers.1.self_attn.sinks": "model-00001-of-00009.safetensors",
41
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
42
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
43
+ "model.layers.10.input_layernorm.weight": "model-00004-of-00009.safetensors",
44
+ "model.layers.10.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
45
+ "model.layers.10.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
46
+ "model.layers.10.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
47
+ "model.layers.10.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
48
+ "model.layers.10.mlp.router.bias": "model-00004-of-00009.safetensors",
49
+ "model.layers.10.mlp.router.weight": "model-00004-of-00009.safetensors",
50
+ "model.layers.10.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
51
+ "model.layers.10.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
52
+ "model.layers.10.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
53
+ "model.layers.10.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
54
+ "model.layers.10.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
55
+ "model.layers.10.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
56
+ "model.layers.10.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
57
+ "model.layers.10.self_attn.sinks": "model-00004-of-00009.safetensors",
58
+ "model.layers.10.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
59
+ "model.layers.10.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
60
+ "model.layers.11.input_layernorm.weight": "model-00005-of-00009.safetensors",
61
+ "model.layers.11.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
62
+ "model.layers.11.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
63
+ "model.layers.11.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
64
+ "model.layers.11.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
65
+ "model.layers.11.mlp.router.bias": "model-00004-of-00009.safetensors",
66
+ "model.layers.11.mlp.router.weight": "model-00004-of-00009.safetensors",
67
+ "model.layers.11.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
68
+ "model.layers.11.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
69
+ "model.layers.11.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
70
+ "model.layers.11.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
71
+ "model.layers.11.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
72
+ "model.layers.11.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
73
+ "model.layers.11.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
74
+ "model.layers.11.self_attn.sinks": "model-00004-of-00009.safetensors",
75
+ "model.layers.11.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
76
+ "model.layers.11.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
77
+ "model.layers.12.input_layernorm.weight": "model-00005-of-00009.safetensors",
78
+ "model.layers.12.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
79
+ "model.layers.12.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
80
+ "model.layers.12.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
81
+ "model.layers.12.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
82
+ "model.layers.12.mlp.router.bias": "model-00005-of-00009.safetensors",
83
+ "model.layers.12.mlp.router.weight": "model-00005-of-00009.safetensors",
84
+ "model.layers.12.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
85
+ "model.layers.12.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
86
+ "model.layers.12.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
87
+ "model.layers.12.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
88
+ "model.layers.12.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
89
+ "model.layers.12.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
90
+ "model.layers.12.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
91
+ "model.layers.12.self_attn.sinks": "model-00005-of-00009.safetensors",
92
+ "model.layers.12.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
93
+ "model.layers.12.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
94
+ "model.layers.13.input_layernorm.weight": "model-00005-of-00009.safetensors",
95
+ "model.layers.13.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
96
+ "model.layers.13.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
97
+ "model.layers.13.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
98
+ "model.layers.13.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
99
+ "model.layers.13.mlp.router.bias": "model-00005-of-00009.safetensors",
100
+ "model.layers.13.mlp.router.weight": "model-00005-of-00009.safetensors",
101
+ "model.layers.13.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
102
+ "model.layers.13.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
103
+ "model.layers.13.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
104
+ "model.layers.13.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
105
+ "model.layers.13.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
106
+ "model.layers.13.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
107
+ "model.layers.13.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
108
+ "model.layers.13.self_attn.sinks": "model-00005-of-00009.safetensors",
109
+ "model.layers.13.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
110
+ "model.layers.13.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
111
+ "model.layers.14.input_layernorm.weight": "model-00006-of-00009.safetensors",
112
+ "model.layers.14.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
113
+ "model.layers.14.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
114
+ "model.layers.14.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
115
+ "model.layers.14.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
116
+ "model.layers.14.mlp.router.bias": "model-00005-of-00009.safetensors",
117
+ "model.layers.14.mlp.router.weight": "model-00005-of-00009.safetensors",
118
+ "model.layers.14.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
119
+ "model.layers.14.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
120
+ "model.layers.14.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
121
+ "model.layers.14.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
122
+ "model.layers.14.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
123
+ "model.layers.14.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
124
+ "model.layers.14.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
125
+ "model.layers.14.self_attn.sinks": "model-00005-of-00009.safetensors",
126
+ "model.layers.14.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
127
+ "model.layers.14.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
128
+ "model.layers.15.input_layernorm.weight": "model-00006-of-00009.safetensors",
129
+ "model.layers.15.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
130
+ "model.layers.15.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
131
+ "model.layers.15.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
132
+ "model.layers.15.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
133
+ "model.layers.15.mlp.router.bias": "model-00006-of-00009.safetensors",
134
+ "model.layers.15.mlp.router.weight": "model-00006-of-00009.safetensors",
135
+ "model.layers.15.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
136
+ "model.layers.15.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
137
+ "model.layers.15.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
138
+ "model.layers.15.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
139
+ "model.layers.15.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
140
+ "model.layers.15.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
141
+ "model.layers.15.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
142
+ "model.layers.15.self_attn.sinks": "model-00006-of-00009.safetensors",
143
+ "model.layers.15.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
144
+ "model.layers.15.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
145
+ "model.layers.16.input_layernorm.weight": "model-00006-of-00009.safetensors",
146
+ "model.layers.16.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
147
+ "model.layers.16.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
148
+ "model.layers.16.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
149
+ "model.layers.16.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
150
+ "model.layers.16.mlp.router.bias": "model-00006-of-00009.safetensors",
151
+ "model.layers.16.mlp.router.weight": "model-00006-of-00009.safetensors",
152
+ "model.layers.16.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
153
+ "model.layers.16.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
154
+ "model.layers.16.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
155
+ "model.layers.16.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
156
+ "model.layers.16.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
157
+ "model.layers.16.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
158
+ "model.layers.16.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
159
+ "model.layers.16.self_attn.sinks": "model-00006-of-00009.safetensors",
160
+ "model.layers.16.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
161
+ "model.layers.16.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
162
+ "model.layers.17.input_layernorm.weight": "model-00007-of-00009.safetensors",
163
+ "model.layers.17.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
164
+ "model.layers.17.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
165
+ "model.layers.17.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
166
+ "model.layers.17.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
167
+ "model.layers.17.mlp.router.bias": "model-00006-of-00009.safetensors",
168
+ "model.layers.17.mlp.router.weight": "model-00006-of-00009.safetensors",
169
+ "model.layers.17.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
170
+ "model.layers.17.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
171
+ "model.layers.17.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
172
+ "model.layers.17.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
173
+ "model.layers.17.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
174
+ "model.layers.17.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
175
+ "model.layers.17.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
176
+ "model.layers.17.self_attn.sinks": "model-00006-of-00009.safetensors",
177
+ "model.layers.17.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
178
+ "model.layers.17.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
179
+ "model.layers.18.input_layernorm.weight": "model-00007-of-00009.safetensors",
180
+ "model.layers.18.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
181
+ "model.layers.18.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
182
+ "model.layers.18.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
183
+ "model.layers.18.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
184
+ "model.layers.18.mlp.router.bias": "model-00007-of-00009.safetensors",
185
+ "model.layers.18.mlp.router.weight": "model-00007-of-00009.safetensors",
186
+ "model.layers.18.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
187
+ "model.layers.18.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
188
+ "model.layers.18.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
189
+ "model.layers.18.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
190
+ "model.layers.18.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
191
+ "model.layers.18.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
192
+ "model.layers.18.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
193
+ "model.layers.18.self_attn.sinks": "model-00007-of-00009.safetensors",
194
+ "model.layers.18.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
195
+ "model.layers.18.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
196
+ "model.layers.19.input_layernorm.weight": "model-00007-of-00009.safetensors",
197
+ "model.layers.19.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
198
+ "model.layers.19.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
199
+ "model.layers.19.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
200
+ "model.layers.19.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
201
+ "model.layers.19.mlp.router.bias": "model-00007-of-00009.safetensors",
202
+ "model.layers.19.mlp.router.weight": "model-00007-of-00009.safetensors",
203
+ "model.layers.19.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
204
+ "model.layers.19.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
205
+ "model.layers.19.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
206
+ "model.layers.19.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
207
+ "model.layers.19.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
208
+ "model.layers.19.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
209
+ "model.layers.19.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
210
+ "model.layers.19.self_attn.sinks": "model-00007-of-00009.safetensors",
211
+ "model.layers.19.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
212
+ "model.layers.19.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
213
+ "model.layers.2.input_layernorm.weight": "model-00002-of-00009.safetensors",
214
+ "model.layers.2.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
215
+ "model.layers.2.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
216
+ "model.layers.2.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
217
+ "model.layers.2.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
218
+ "model.layers.2.mlp.router.bias": "model-00001-of-00009.safetensors",
219
+ "model.layers.2.mlp.router.weight": "model-00001-of-00009.safetensors",
220
+ "model.layers.2.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
221
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
222
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
223
+ "model.layers.2.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
224
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
225
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
226
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
227
+ "model.layers.2.self_attn.sinks": "model-00001-of-00009.safetensors",
228
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
229
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
230
+ "model.layers.20.input_layernorm.weight": "model-00008-of-00009.safetensors",
231
+ "model.layers.20.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
232
+ "model.layers.20.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
233
+ "model.layers.20.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
234
+ "model.layers.20.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
235
+ "model.layers.20.mlp.router.bias": "model-00007-of-00009.safetensors",
236
+ "model.layers.20.mlp.router.weight": "model-00007-of-00009.safetensors",
237
+ "model.layers.20.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
238
+ "model.layers.20.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
239
+ "model.layers.20.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
240
+ "model.layers.20.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
241
+ "model.layers.20.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
242
+ "model.layers.20.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
243
+ "model.layers.20.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
244
+ "model.layers.20.self_attn.sinks": "model-00007-of-00009.safetensors",
245
+ "model.layers.20.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
246
+ "model.layers.20.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
247
+ "model.layers.21.input_layernorm.weight": "model-00008-of-00009.safetensors",
248
+ "model.layers.21.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
249
+ "model.layers.21.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
250
+ "model.layers.21.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
251
+ "model.layers.21.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
252
+ "model.layers.21.mlp.router.bias": "model-00008-of-00009.safetensors",
253
+ "model.layers.21.mlp.router.weight": "model-00008-of-00009.safetensors",
254
+ "model.layers.21.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
255
+ "model.layers.21.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
256
+ "model.layers.21.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
257
+ "model.layers.21.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
258
+ "model.layers.21.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
259
+ "model.layers.21.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
260
+ "model.layers.21.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
261
+ "model.layers.21.self_attn.sinks": "model-00008-of-00009.safetensors",
262
+ "model.layers.21.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
263
+ "model.layers.21.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
264
+ "model.layers.22.input_layernorm.weight": "model-00008-of-00009.safetensors",
265
+ "model.layers.22.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
266
+ "model.layers.22.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
267
+ "model.layers.22.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
268
+ "model.layers.22.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
269
+ "model.layers.22.mlp.router.bias": "model-00008-of-00009.safetensors",
270
+ "model.layers.22.mlp.router.weight": "model-00008-of-00009.safetensors",
271
+ "model.layers.22.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
272
+ "model.layers.22.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
273
+ "model.layers.22.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
274
+ "model.layers.22.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
275
+ "model.layers.22.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
276
+ "model.layers.22.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
277
+ "model.layers.22.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
278
+ "model.layers.22.self_attn.sinks": "model-00008-of-00009.safetensors",
279
+ "model.layers.22.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
280
+ "model.layers.22.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
281
+ "model.layers.23.input_layernorm.weight": "model-00009-of-00009.safetensors",
282
+ "model.layers.23.mlp.experts.down_proj": "model-00009-of-00009.safetensors",
283
+ "model.layers.23.mlp.experts.down_proj_bias": "model-00009-of-00009.safetensors",
284
+ "model.layers.23.mlp.experts.gate_up_proj": "model-00009-of-00009.safetensors",
285
+ "model.layers.23.mlp.experts.gate_up_proj_bias": "model-00009-of-00009.safetensors",
286
+ "model.layers.23.mlp.router.bias": "model-00008-of-00009.safetensors",
287
+ "model.layers.23.mlp.router.weight": "model-00008-of-00009.safetensors",
288
+ "model.layers.23.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
289
+ "model.layers.23.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
290
+ "model.layers.23.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
291
+ "model.layers.23.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
292
+ "model.layers.23.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
293
+ "model.layers.23.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
294
+ "model.layers.23.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
295
+ "model.layers.23.self_attn.sinks": "model-00008-of-00009.safetensors",
296
+ "model.layers.23.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
297
+ "model.layers.23.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
298
+ "model.layers.3.input_layernorm.weight": "model-00002-of-00009.safetensors",
299
+ "model.layers.3.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
300
+ "model.layers.3.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
301
+ "model.layers.3.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
302
+ "model.layers.3.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
303
+ "model.layers.3.mlp.router.bias": "model-00002-of-00009.safetensors",
304
+ "model.layers.3.mlp.router.weight": "model-00002-of-00009.safetensors",
305
+ "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
306
+ "model.layers.3.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
307
+ "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
308
+ "model.layers.3.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
309
+ "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
310
+ "model.layers.3.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
311
+ "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
312
+ "model.layers.3.self_attn.sinks": "model-00002-of-00009.safetensors",
313
+ "model.layers.3.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
314
+ "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
315
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00009.safetensors",
316
+ "model.layers.4.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
317
+ "model.layers.4.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
318
+ "model.layers.4.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
319
+ "model.layers.4.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
320
+ "model.layers.4.mlp.router.bias": "model-00002-of-00009.safetensors",
321
+ "model.layers.4.mlp.router.weight": "model-00002-of-00009.safetensors",
322
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
323
+ "model.layers.4.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
324
+ "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
325
+ "model.layers.4.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
326
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
327
+ "model.layers.4.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
328
+ "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
329
+ "model.layers.4.self_attn.sinks": "model-00002-of-00009.safetensors",
330
+ "model.layers.4.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
331
+ "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
332
+ "model.layers.5.input_layernorm.weight": "model-00003-of-00009.safetensors",
333
+ "model.layers.5.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
334
+ "model.layers.5.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
335
+ "model.layers.5.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
336
+ "model.layers.5.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
337
+ "model.layers.5.mlp.router.bias": "model-00002-of-00009.safetensors",
338
+ "model.layers.5.mlp.router.weight": "model-00002-of-00009.safetensors",
339
+ "model.layers.5.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
340
+ "model.layers.5.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
341
+ "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
342
+ "model.layers.5.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
343
+ "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
344
+ "model.layers.5.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
345
+ "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
346
+ "model.layers.5.self_attn.sinks": "model-00002-of-00009.safetensors",
347
+ "model.layers.5.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
348
+ "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
349
+ "model.layers.6.input_layernorm.weight": "model-00003-of-00009.safetensors",
350
+ "model.layers.6.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
351
+ "model.layers.6.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
352
+ "model.layers.6.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
353
+ "model.layers.6.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
354
+ "model.layers.6.mlp.router.bias": "model-00003-of-00009.safetensors",
355
+ "model.layers.6.mlp.router.weight": "model-00003-of-00009.safetensors",
356
+ "model.layers.6.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
357
+ "model.layers.6.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
358
+ "model.layers.6.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
359
+ "model.layers.6.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
360
+ "model.layers.6.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
361
+ "model.layers.6.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
362
+ "model.layers.6.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
363
+ "model.layers.6.self_attn.sinks": "model-00003-of-00009.safetensors",
364
+ "model.layers.6.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
365
+ "model.layers.6.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
366
+ "model.layers.7.input_layernorm.weight": "model-00003-of-00009.safetensors",
367
+ "model.layers.7.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
368
+ "model.layers.7.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
369
+ "model.layers.7.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
370
+ "model.layers.7.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
371
+ "model.layers.7.mlp.router.bias": "model-00003-of-00009.safetensors",
372
+ "model.layers.7.mlp.router.weight": "model-00003-of-00009.safetensors",
373
+ "model.layers.7.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
374
+ "model.layers.7.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
375
+ "model.layers.7.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
376
+ "model.layers.7.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
377
+ "model.layers.7.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
378
+ "model.layers.7.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
379
+ "model.layers.7.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
380
+ "model.layers.7.self_attn.sinks": "model-00003-of-00009.safetensors",
381
+ "model.layers.7.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
382
+ "model.layers.7.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
383
+ "model.layers.8.input_layernorm.weight": "model-00004-of-00009.safetensors",
384
+ "model.layers.8.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
385
+ "model.layers.8.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
386
+ "model.layers.8.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
387
+ "model.layers.8.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
388
+ "model.layers.8.mlp.router.bias": "model-00003-of-00009.safetensors",
389
+ "model.layers.8.mlp.router.weight": "model-00003-of-00009.safetensors",
390
+ "model.layers.8.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
391
+ "model.layers.8.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
392
+ "model.layers.8.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
393
+ "model.layers.8.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
394
+ "model.layers.8.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
395
+ "model.layers.8.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
396
+ "model.layers.8.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
397
+ "model.layers.8.self_attn.sinks": "model-00003-of-00009.safetensors",
398
+ "model.layers.8.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
399
+ "model.layers.8.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
400
+ "model.layers.9.input_layernorm.weight": "model-00004-of-00009.safetensors",
401
+ "model.layers.9.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
402
+ "model.layers.9.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
403
+ "model.layers.9.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
404
+ "model.layers.9.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
405
+ "model.layers.9.mlp.router.bias": "model-00004-of-00009.safetensors",
406
+ "model.layers.9.mlp.router.weight": "model-00004-of-00009.safetensors",
407
+ "model.layers.9.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
408
+ "model.layers.9.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
409
+ "model.layers.9.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
410
+ "model.layers.9.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
411
+ "model.layers.9.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
412
+ "model.layers.9.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
413
+ "model.layers.9.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
414
+ "model.layers.9.self_attn.sinks": "model-00004-of-00009.safetensors",
415
+ "model.layers.9.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
416
+ "model.layers.9.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
417
+ "model.norm.weight": "model-00009-of-00009.safetensors"
418
+ }
419
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|return|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0614fe83cadab421296e664e1f48f4261fa8fef6e03e63bb75c20f38e37d07d3
3
+ size 27868174
tokenizer_config.json ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "199998": {
4
+ "content": "<|startoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "199999": {
12
+ "content": "<|endoftext|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "200000": {
20
+ "content": "<|reserved_200000|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "200001": {
28
+ "content": "<|reserved_200001|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "200002": {
36
+ "content": "<|return|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "200003": {
44
+ "content": "<|constrain|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "200004": {
52
+ "content": "<|reserved_200004|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "200005": {
60
+ "content": "<|channel|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "200006": {
68
+ "content": "<|start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "200007": {
76
+ "content": "<|end|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "200008": {
84
+ "content": "<|message|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "200009": {
92
+ "content": "<|reserved_200009|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "200010": {
100
+ "content": "<|reserved_200010|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "200011": {
108
+ "content": "<|reserved_200011|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "200012": {
116
+ "content": "<|call|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "200013": {
124
+ "content": "<|reserved_200013|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "200014": {
132
+ "content": "<|reserved_200014|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "200015": {
140
+ "content": "<|reserved_200015|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "200016": {
148
+ "content": "<|reserved_200016|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "200017": {
156
+ "content": "<|reserved_200017|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "200018": {
164
+ "content": "<|endofprompt|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ }
171
+ },
172
+ "bos_token": "<|startoftext|>",
173
+ "clean_up_tokenization_spaces": false,
174
+ "eos_token": "<|return|>",
175
+ "extra_special_tokens": {},
176
+ "model_input_names": [
177
+ "input_ids",
178
+ "attention_mask"
179
+ ],
180
+ "model_max_length": 1000000000000000019884624838656,
181
+ "pad_token": "<|endoftext|>",
182
+ "tokenizer_class": "PreTrainedTokenizerFast"
183
+ }