Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +144 -0
- chat_template.jinja +397 -0
- config.json +68 -0
- generation_config.json +10 -0
- model-00001-of-00009.safetensors +3 -0
- model-00002-of-00009.safetensors +3 -0
- model-00003-of-00009.safetensors +3 -0
- model-00004-of-00009.safetensors +3 -0
- model-00005-of-00009.safetensors +3 -0
- model-00006-of-00009.safetensors +3 -0
- model-00007-of-00009.safetensors +3 -0
- model-00008-of-00009.safetensors +3 -0
- model-00009-of-00009.safetensors +3 -0
- model.safetensors.index.json +419 -0
- special_tokens_map.json +23 -0
- tokenizer.json +3 -0
- tokenizer_config.json +183 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: apache-2.0
|
4 |
+
base_model: openai/gpt-oss-20b
|
5 |
+
tags:
|
6 |
+
- generated_from_trainer
|
7 |
+
datasets:
|
8 |
+
- HuggingFaceH4/Multilingual-Thinking
|
9 |
+
model-index:
|
10 |
+
- name: outputs/gpt-oss-out-fft/
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
[<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
|
18 |
+
<details><summary>See axolotl config</summary>
|
19 |
+
|
20 |
+
axolotl version: `0.12.0.dev0`
|
21 |
+
```yaml
|
22 |
+
base_model: openai/gpt-oss-20b
|
23 |
+
use_kernels: true
|
24 |
+
model_quantization_config: Mxfp4Config
|
25 |
+
model_quantization_config_kwargs:
|
26 |
+
dequantize: true
|
27 |
+
block_size: 32 # default, matches the OCP spec
|
28 |
+
strict: false # fallback to fp16 on any odd layers
|
29 |
+
|
30 |
+
plugins:
|
31 |
+
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
32 |
+
|
33 |
+
experimental_skip_move_to_device: true # prevent OOM by NOT putting model to GPU before sharding
|
34 |
+
|
35 |
+
datasets:
|
36 |
+
# - path: winglian/pirate-ultrachat-10k
|
37 |
+
# type: chat_template
|
38 |
+
# split: train
|
39 |
+
- message_property_mappings:
|
40 |
+
content: content
|
41 |
+
role: role
|
42 |
+
path: HuggingFaceH4/Multilingual-Thinking
|
43 |
+
trust_remote_code: false
|
44 |
+
field_messages: messages
|
45 |
+
type: chat_template
|
46 |
+
|
47 |
+
dataset_prepared_path: last_run_prepared
|
48 |
+
val_set_size: 0
|
49 |
+
output_dir: ./outputs/gpt-oss-out-fft/
|
50 |
+
|
51 |
+
sequence_len: 8192
|
52 |
+
sample_packing: true
|
53 |
+
|
54 |
+
# adapter: lora
|
55 |
+
# lora_r: 8
|
56 |
+
# lora_alpha: 16
|
57 |
+
# lora_dropout: 0.0
|
58 |
+
# lora_target_linear: true
|
59 |
+
|
60 |
+
wandb_project: gpt-oss-20b
|
61 |
+
wandb_name: multilingual-reasoning
|
62 |
+
|
63 |
+
gradient_accumulation_steps: 1
|
64 |
+
micro_batch_size: 2
|
65 |
+
num_epochs: 1
|
66 |
+
|
67 |
+
optimizer: adamw_torch_8bit
|
68 |
+
lr_scheduler: constant_with_warmup
|
69 |
+
learning_rate: 2e-5
|
70 |
+
|
71 |
+
bf16: true
|
72 |
+
tf32: true
|
73 |
+
|
74 |
+
flash_attention: true
|
75 |
+
attn_implementation: kernels-community/vllm-flash-attn3
|
76 |
+
|
77 |
+
gradient_checkpointing: true
|
78 |
+
activation_offloading: true
|
79 |
+
|
80 |
+
logging_steps: 1
|
81 |
+
saves_per_epoch: 1
|
82 |
+
|
83 |
+
warmup_ratio: 0.1
|
84 |
+
|
85 |
+
special_tokens:
|
86 |
+
eot_tokens:
|
87 |
+
- "<|end|>"
|
88 |
+
|
89 |
+
fsdp_version: 2
|
90 |
+
fsdp_config:
|
91 |
+
offload_params: false
|
92 |
+
state_dict_type: SHARDED_STATE_DICT
|
93 |
+
auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
94 |
+
transformer_layer_cls_to_wrap: GptOssDecoderLayer
|
95 |
+
reshard_after_forward: true
|
96 |
+
|
97 |
+
|
98 |
+
```
|
99 |
+
|
100 |
+
</details><br>
|
101 |
+
|
102 |
+
# outputs/gpt-oss-out-fft/
|
103 |
+
|
104 |
+
This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b) on the HuggingFaceH4/Multilingual-Thinking dataset.
|
105 |
+
|
106 |
+
## Model description
|
107 |
+
|
108 |
+
More information needed
|
109 |
+
|
110 |
+
## Intended uses & limitations
|
111 |
+
|
112 |
+
More information needed
|
113 |
+
|
114 |
+
## Training and evaluation data
|
115 |
+
|
116 |
+
More information needed
|
117 |
+
|
118 |
+
## Training procedure
|
119 |
+
|
120 |
+
### Training hyperparameters
|
121 |
+
|
122 |
+
The following hyperparameters were used during training:
|
123 |
+
- learning_rate: 2e-05
|
124 |
+
- train_batch_size: 2
|
125 |
+
- eval_batch_size: 2
|
126 |
+
- seed: 42
|
127 |
+
- distributed_type: multi-GPU
|
128 |
+
- num_devices: 8
|
129 |
+
- total_train_batch_size: 16
|
130 |
+
- total_eval_batch_size: 16
|
131 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
132 |
+
- lr_scheduler_type: constant_with_warmup
|
133 |
+
- training_steps: 8
|
134 |
+
|
135 |
+
### Training results
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
### Framework versions
|
140 |
+
|
141 |
+
- Transformers 4.55.0
|
142 |
+
- Pytorch 2.8.0+cu128
|
143 |
+
- Datasets 4.0.0
|
144 |
+
- Tokenizers 0.21.4
|
chat_template.jinja
ADDED
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{#-
|
2 |
+
In addition to the normal inputs of `messages` and `tools`, this template also accepts the
|
3 |
+
following kwargs:
|
4 |
+
- "builtin_tools": A list, can contain "browser" and/or "python".
|
5 |
+
- "model_identity": A string that optionally describes the model identity.
|
6 |
+
- "reasoning_effort": A string that describes the reasoning effort, defaults to "medium".
|
7 |
+
#}
|
8 |
+
|
9 |
+
{#- Tool Definition Rendering ============================================== #}
|
10 |
+
{%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%}
|
11 |
+
{%- if param_spec.type == "array" -%}
|
12 |
+
{%- if param_spec['items'] -%}
|
13 |
+
{%- if param_spec['items']['type'] == "string" -%}
|
14 |
+
{{- "string[]" }}
|
15 |
+
{%- elif param_spec['items']['type'] == "number" -%}
|
16 |
+
{{- "number[]" }}
|
17 |
+
{%- elif param_spec['items']['type'] == "integer" -%}
|
18 |
+
{{- "number[]" }}
|
19 |
+
{%- elif param_spec['items']['type'] == "boolean" -%}
|
20 |
+
{{- "boolean[]" }}
|
21 |
+
{%- else -%}
|
22 |
+
{%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%}
|
23 |
+
{%- if inner_type == "object | object" or inner_type|length > 50 -%}
|
24 |
+
{{- "any[]" }}
|
25 |
+
{%- else -%}
|
26 |
+
{{- inner_type + "[]" }}
|
27 |
+
{%- endif -%}
|
28 |
+
{%- endif -%}
|
29 |
+
{%- if param_spec.nullable -%}
|
30 |
+
{{- " | null" }}
|
31 |
+
{%- endif -%}
|
32 |
+
{%- else -%}
|
33 |
+
{{- "any[]" }}
|
34 |
+
{%- if param_spec.nullable -%}
|
35 |
+
{{- " | null" }}
|
36 |
+
{%- endif -%}
|
37 |
+
{%- endif -%}
|
38 |
+
{%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%}
|
39 |
+
{#- Handle array of types like ["object", "object"] from Union[dict, list] #}
|
40 |
+
{%- if param_spec.type | length > 1 -%}
|
41 |
+
{{- param_spec.type | join(" | ") }}
|
42 |
+
{%- else -%}
|
43 |
+
{{- param_spec.type[0] }}
|
44 |
+
{%- endif -%}
|
45 |
+
{%- elif param_spec.oneOf -%}
|
46 |
+
{#- Handle oneOf schemas - check for complex unions and fallback to any #}
|
47 |
+
{%- set has_object_variants = false -%}
|
48 |
+
{%- for variant in param_spec.oneOf -%}
|
49 |
+
{%- if variant.type == "object" -%}
|
50 |
+
{%- set has_object_variants = true -%}
|
51 |
+
{%- endif -%}
|
52 |
+
{%- endfor -%}
|
53 |
+
{%- if has_object_variants and param_spec.oneOf|length > 1 -%}
|
54 |
+
{{- "any" }}
|
55 |
+
{%- else -%}
|
56 |
+
{%- for variant in param_spec.oneOf -%}
|
57 |
+
{{- render_typescript_type(variant, required_params) -}}
|
58 |
+
{%- if variant.description %}
|
59 |
+
{{- "// " + variant.description }}
|
60 |
+
{%- endif -%}
|
61 |
+
{%- if variant.default is defined %}
|
62 |
+
{{ "// default: " + variant.default|tojson }}
|
63 |
+
{%- endif -%}
|
64 |
+
{%- if not loop.last %}
|
65 |
+
{{- " | " }}
|
66 |
+
{% endif -%}
|
67 |
+
{%- endfor -%}
|
68 |
+
{%- endif -%}
|
69 |
+
{%- elif param_spec.type == "string" -%}
|
70 |
+
{%- if param_spec.enum -%}
|
71 |
+
{{- '"' + param_spec.enum|join('" | "') + '"' -}}
|
72 |
+
{%- else -%}
|
73 |
+
{{- "string" }}
|
74 |
+
{%- if param_spec.nullable %}
|
75 |
+
{{- " | null" }}
|
76 |
+
{%- endif -%}
|
77 |
+
{%- endif -%}
|
78 |
+
{%- elif param_spec.type == "number" -%}
|
79 |
+
{{- "number" }}
|
80 |
+
{%- elif param_spec.type == "integer" -%}
|
81 |
+
{{- "number" }}
|
82 |
+
{%- elif param_spec.type == "boolean" -%}
|
83 |
+
{{- "boolean" }}
|
84 |
+
|
85 |
+
{%- elif param_spec.type == "object" -%}
|
86 |
+
{%- if param_spec.properties -%}
|
87 |
+
{{- "{
|
88 |
+
" }}
|
89 |
+
{%- for prop_name, prop_spec in param_spec.properties.items() -%}
|
90 |
+
{{- prop_name -}}
|
91 |
+
{%- if prop_name not in (param_spec.required or []) -%}
|
92 |
+
{{- "?" }}
|
93 |
+
{%- endif -%}
|
94 |
+
{{- ": " }}
|
95 |
+
{{ render_typescript_type(prop_spec, param_spec.required or []) }}
|
96 |
+
{%- if not loop.last -%}
|
97 |
+
{{-", " }}
|
98 |
+
{%- endif -%}
|
99 |
+
{%- endfor -%}
|
100 |
+
{{- "}" }}
|
101 |
+
{%- else -%}
|
102 |
+
{{- "object" }}
|
103 |
+
{%- endif -%}
|
104 |
+
{%- else -%}
|
105 |
+
{{- "any" }}
|
106 |
+
{%- endif -%}
|
107 |
+
{%- endmacro -%}
|
108 |
+
|
109 |
+
{%- macro render_tool_namespace(namespace_name, tools) -%}
|
110 |
+
{{- "## " + namespace_name + "
|
111 |
+
|
112 |
+
" }}
|
113 |
+
{{- "namespace " + namespace_name + " {
|
114 |
+
|
115 |
+
" }}
|
116 |
+
{%- for tool in tools %}
|
117 |
+
{%- set tool = tool.function %}
|
118 |
+
{{- "// " + tool.description + "
|
119 |
+
" }}
|
120 |
+
{{- "type "+ tool.name + " = " }}
|
121 |
+
{%- if tool.parameters and tool.parameters.properties %}
|
122 |
+
{{- "(_: {
|
123 |
+
" }}
|
124 |
+
{%- for param_name, param_spec in tool.parameters.properties.items() %}
|
125 |
+
{%- if param_spec.description %}
|
126 |
+
{{- "// " + param_spec.description + "
|
127 |
+
" }}
|
128 |
+
{%- endif %}
|
129 |
+
{{- param_name }}
|
130 |
+
{%- if param_name not in (tool.parameters.required or []) -%}
|
131 |
+
{{- "?" }}
|
132 |
+
{%- endif -%}
|
133 |
+
{{- ": " }}
|
134 |
+
{{- render_typescript_type(param_spec, tool.parameters.required or []) }}
|
135 |
+
{%- if param_spec.default is defined -%}
|
136 |
+
{%- if param_spec.enum %}
|
137 |
+
{{- ", // default: " + param_spec.default }}
|
138 |
+
{%- elif param_spec.oneOf %}
|
139 |
+
{{- "// default: " + param_spec.default }}
|
140 |
+
{%- else %}
|
141 |
+
{{- ", // default: " + param_spec.default|tojson }}
|
142 |
+
{%- endif -%}
|
143 |
+
{%- endif -%}
|
144 |
+
{%- if not loop.last %}
|
145 |
+
{{- ",
|
146 |
+
" }}
|
147 |
+
{%- else %}
|
148 |
+
{{- "
|
149 |
+
" }}
|
150 |
+
{%- endif -%}
|
151 |
+
{%- endfor %}
|
152 |
+
{{- "}) => any;
|
153 |
+
|
154 |
+
" }}
|
155 |
+
{%- else -%}
|
156 |
+
{{- "() => any;
|
157 |
+
|
158 |
+
" }}
|
159 |
+
{%- endif -%}
|
160 |
+
{%- endfor %}
|
161 |
+
{{- "} // namespace " + namespace_name }}
|
162 |
+
{%- endmacro -%}
|
163 |
+
|
164 |
+
{%- macro render_builtin_tools(browser_tool, python_tool) -%}
|
165 |
+
{%- if browser_tool %}
|
166 |
+
{{- "## browser
|
167 |
+
|
168 |
+
" }}
|
169 |
+
{{- "// Tool for browsing.
|
170 |
+
" }}
|
171 |
+
{{- "// The `cursor` appears in brackets before each browsing display: `[{cursor}]`.
|
172 |
+
" }}
|
173 |
+
{{- "// Cite information from the tool using the following format:
|
174 |
+
" }}
|
175 |
+
{{- "// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.
|
176 |
+
" }}
|
177 |
+
{{- "// Do not quote more than 10 words directly from the tool output.
|
178 |
+
" }}
|
179 |
+
{{- "// sources=web (default: web)
|
180 |
+
" }}
|
181 |
+
{{- "namespace browser {
|
182 |
+
|
183 |
+
" }}
|
184 |
+
{{- "// Searches for information related to `query` and displays `topn` results.
|
185 |
+
" }}
|
186 |
+
{{- "type search = (_: {
|
187 |
+
" }}
|
188 |
+
{{- "query: string,
|
189 |
+
" }}
|
190 |
+
{{- "topn?: number, // default: 10
|
191 |
+
" }}
|
192 |
+
{{- "source?: string,
|
193 |
+
" }}
|
194 |
+
{{- "}) => any;
|
195 |
+
|
196 |
+
" }}
|
197 |
+
{{- "// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.
|
198 |
+
" }}
|
199 |
+
{{- "// Valid link ids are displayed with the formatting: `【{id}†.*】`.
|
200 |
+
" }}
|
201 |
+
{{- "// If `cursor` is not provided, the most recent page is implied.
|
202 |
+
" }}
|
203 |
+
{{- "// If `id` is a string, it is treated as a fully qualified URL associated with `source`.
|
204 |
+
" }}
|
205 |
+
{{- "// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.
|
206 |
+
" }}
|
207 |
+
{{- "// Use this function without `id` to scroll to a new location of an opened page.
|
208 |
+
" }}
|
209 |
+
{{- "type open = (_: {
|
210 |
+
" }}
|
211 |
+
{{- "id?: number | string, // default: -1
|
212 |
+
" }}
|
213 |
+
{{- "cursor?: number, // default: -1
|
214 |
+
" }}
|
215 |
+
{{- "loc?: number, // default: -1
|
216 |
+
" }}
|
217 |
+
{{- "num_lines?: number, // default: -1
|
218 |
+
" }}
|
219 |
+
{{- "view_source?: boolean, // default: false
|
220 |
+
" }}
|
221 |
+
{{- "source?: string,
|
222 |
+
" }}
|
223 |
+
{{- "}) => any;
|
224 |
+
|
225 |
+
" }}
|
226 |
+
{{- "// Finds exact matches of `pattern` in the current page, or the page given by `cursor`.
|
227 |
+
" }}
|
228 |
+
{{- "type find = (_: {
|
229 |
+
" }}
|
230 |
+
{{- "pattern: string,
|
231 |
+
" }}
|
232 |
+
{{- "cursor?: number, // default: -1
|
233 |
+
" }}
|
234 |
+
{{- "}) => any;
|
235 |
+
|
236 |
+
" }}
|
237 |
+
{{- "} // namespace browser
|
238 |
+
|
239 |
+
" }}
|
240 |
+
{%- endif -%}
|
241 |
+
|
242 |
+
{%- if python_tool %}
|
243 |
+
{{- "## python
|
244 |
+
|
245 |
+
" }}
|
246 |
+
{{- "Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
|
247 |
+
|
248 |
+
" }}
|
249 |
+
{{- "When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster.
|
250 |
+
|
251 |
+
" }}
|
252 |
+
{%- endif -%}
|
253 |
+
{%- endmacro -%}
|
254 |
+
|
255 |
+
{#- System Message Construction ============================================ #}
|
256 |
+
{%- macro build_system_message() -%}
|
257 |
+
{%- if model_identity is not defined %}
|
258 |
+
{%- set model_identity = "You are ChatGPT, a large language model trained by OpenAI." %}
|
259 |
+
{%- endif %}
|
260 |
+
{{- model_identity + "
|
261 |
+
" }}
|
262 |
+
{{- "Knowledge cutoff: 2024-06
|
263 |
+
" }}
|
264 |
+
{{- "Current date: " + strftime_now("%Y-%m-%d") + "
|
265 |
+
|
266 |
+
" }}
|
267 |
+
{%- if reasoning_effort is not defined %}
|
268 |
+
{%- set reasoning_effort = "medium" %}
|
269 |
+
{%- endif %}
|
270 |
+
{{- "Reasoning: " + reasoning_effort + "
|
271 |
+
|
272 |
+
" }}
|
273 |
+
{%- if builtin_tools %}
|
274 |
+
{{- "# Tools
|
275 |
+
|
276 |
+
" }}
|
277 |
+
{%- set available_builtin_tools = namespace(browser=false, python=false) %}
|
278 |
+
{%- for tool in builtin_tools %}
|
279 |
+
{%- if tool == "browser" %}
|
280 |
+
{%- set available_builtin_tools.browser = true %}
|
281 |
+
{%- elif tool == "python" %}
|
282 |
+
{%- set available_builtin_tools.python = true %}
|
283 |
+
{%- endif %}
|
284 |
+
{%- endfor %}
|
285 |
+
{{- render_builtin_tools(available_builtin_tools.browser, available_builtin_tools.python) }}
|
286 |
+
{%- endif -%}
|
287 |
+
{{- "# Valid channels: analysis, commentary, final. Channel must be included for every message." }}
|
288 |
+
{%- if tools -%}
|
289 |
+
{{- "
|
290 |
+
Calls to these tools must go to the commentary channel: 'functions'." }}
|
291 |
+
{%- endif -%}
|
292 |
+
{%- endmacro -%}
|
293 |
+
|
294 |
+
{#- Main Template Logic ================================================= #}
|
295 |
+
{#- Set defaults #}
|
296 |
+
|
297 |
+
{#- Render system message #}
|
298 |
+
{{- "<|start|>system<|message|>" }}
|
299 |
+
{{- build_system_message() }}
|
300 |
+
{{- "<|end|>" }}
|
301 |
+
|
302 |
+
{#- Extract developer message #}
|
303 |
+
{%- if messages[0].role == "developer" or messages[0].role == "system" %}
|
304 |
+
{%- set developer_message = messages[0].content %}
|
305 |
+
{%- set loop_messages = messages[1:] %}
|
306 |
+
{%- else %}
|
307 |
+
{%- set developer_message = "" %}
|
308 |
+
{%- set loop_messages = messages %}
|
309 |
+
{%- endif %}
|
310 |
+
|
311 |
+
{#- Render developer message #}
|
312 |
+
{%- if developer_message or tools %}
|
313 |
+
{{- "<|start|>developer<|message|>" }}
|
314 |
+
{%- if developer_message %}
|
315 |
+
{{- "# Instructions
|
316 |
+
|
317 |
+
" }}
|
318 |
+
{{- developer_message }}
|
319 |
+
{%- endif %}
|
320 |
+
{%- if tools -%}
|
321 |
+
{{- "
|
322 |
+
|
323 |
+
" }}
|
324 |
+
{{- "# Tools
|
325 |
+
|
326 |
+
" }}
|
327 |
+
{{- render_tool_namespace("functions", tools) }}
|
328 |
+
{%- endif -%}
|
329 |
+
{{- "<|end|>" }}
|
330 |
+
{%- endif %}
|
331 |
+
|
332 |
+
{#- Render messages #}
|
333 |
+
{%- set last_tool_call = namespace(name=none) %}
|
334 |
+
{%- for message in loop_messages -%}
|
335 |
+
{#- At this point only assistant/user/tool messages should remain #}
|
336 |
+
{%- if message.role == 'assistant' -%}
|
337 |
+
{#- Checks to ensure the messages are being passed in the format we expect #}
|
338 |
+
{%- if "content" in message %}
|
339 |
+
{%- if "<|channel|>analysis<|message|>" in message.content or "<|channel|>final<|message|>" in message.content %}
|
340 |
+
{{- raise_exception("You have passed a message containing <|channel|> tags in the content field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }}
|
341 |
+
{%- endif %}
|
342 |
+
{%- endif %}
|
343 |
+
{%- if "thinking" in message %}
|
344 |
+
{%- if "<|channel|>analysis<|message|>" in message.thinking or "<|channel|>final<|message|>" in message.thinking %}
|
345 |
+
{{- raise_exception("You have passed a message containing <|channel|> tags in the thinking field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }}
|
346 |
+
{%- endif %}
|
347 |
+
{%- endif %}
|
348 |
+
{%- if "tool_calls" in message %}
|
349 |
+
{#- We assume max 1 tool call per message, and so we infer the tool call name #}
|
350 |
+
{#- in "tool" messages from the most recent assistant tool call name #}
|
351 |
+
{%- set tool_call = message.tool_calls[0] %}
|
352 |
+
{%- if tool_call.function %}
|
353 |
+
{%- set tool_call = tool_call.function %}
|
354 |
+
{%- endif %}
|
355 |
+
{%- if message.content and message.thinking %}
|
356 |
+
{{- raise_exception("Cannot pass both content and thinking in an assistant message with tool calls! Put the analysis message in one or the other, but not both.") }}
|
357 |
+
{%- elif message.content %}
|
358 |
+
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.content + "<|end|>" }}
|
359 |
+
{%- elif message.thinking %}
|
360 |
+
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
|
361 |
+
{%- endif %}
|
362 |
+
{{- "<|start|>assistant to=" }}
|
363 |
+
{{- "functions." + tool_call.name + "<|channel|>commentary " }}
|
364 |
+
{{- (tool_call.content_type if tool_call.content_type is defined else "json") + "<|message|>" }}
|
365 |
+
{{- tool_call.arguments|tojson }}
|
366 |
+
{{- "<|call|>" }}
|
367 |
+
{%- set last_tool_call.name = tool_call.name %}
|
368 |
+
{%- elif loop.last and not add_generation_prompt %}
|
369 |
+
{#- Only render the CoT if the final turn is an assistant turn and add_generation_prompt is false #}
|
370 |
+
{#- This is a situation that should only occur in training, never in inference. #}
|
371 |
+
{%- if "thinking" in message %}
|
372 |
+
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
|
373 |
+
{%- endif %}
|
374 |
+
{#- <|return|> indicates the end of generation, but <|end|> does not #}
|
375 |
+
{#- <|return|> should never be an input to the model, but we include it as the final token #}
|
376 |
+
{#- when training, so the model learns to emit it. #}
|
377 |
+
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|return|>" }}
|
378 |
+
{%- else %}
|
379 |
+
{#- CoT is dropped during all previous turns, so we never render it for inference #}
|
380 |
+
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|end|>" }}
|
381 |
+
{%- set last_tool_call.name = none %}
|
382 |
+
{%- endif %}
|
383 |
+
{%- elif message.role == 'tool' -%}
|
384 |
+
{%- if last_tool_call.name is none %}
|
385 |
+
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
|
386 |
+
{%- endif %}
|
387 |
+
{{- "<|start|>functions." + last_tool_call.name }}
|
388 |
+
{{- " to=assistant<|channel|>commentary<|message|>" + message.content|tojson + "<|end|>" }}
|
389 |
+
{%- elif message.role == 'user' -%}
|
390 |
+
{{- "<|start|>user<|message|>" + message.content + "<|end|>" }}
|
391 |
+
{%- endif -%}
|
392 |
+
{%- endfor -%}
|
393 |
+
|
394 |
+
{#- Generation prompt #}
|
395 |
+
{%- if add_generation_prompt -%}
|
396 |
+
<|start|>assistant
|
397 |
+
{%- endif -%}
|
config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"FSDPGptOssForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": true,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"eos_token_id": 200002,
|
8 |
+
"experts_per_token": 4,
|
9 |
+
"head_dim": 64,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 2880,
|
12 |
+
"initial_context_length": 4096,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 2880,
|
15 |
+
"layer_types": [
|
16 |
+
"sliding_attention",
|
17 |
+
"full_attention",
|
18 |
+
"sliding_attention",
|
19 |
+
"full_attention",
|
20 |
+
"sliding_attention",
|
21 |
+
"full_attention",
|
22 |
+
"sliding_attention",
|
23 |
+
"full_attention",
|
24 |
+
"sliding_attention",
|
25 |
+
"full_attention",
|
26 |
+
"sliding_attention",
|
27 |
+
"full_attention",
|
28 |
+
"sliding_attention",
|
29 |
+
"full_attention",
|
30 |
+
"sliding_attention",
|
31 |
+
"full_attention",
|
32 |
+
"sliding_attention",
|
33 |
+
"full_attention",
|
34 |
+
"sliding_attention",
|
35 |
+
"full_attention",
|
36 |
+
"sliding_attention",
|
37 |
+
"full_attention",
|
38 |
+
"sliding_attention",
|
39 |
+
"full_attention"
|
40 |
+
],
|
41 |
+
"max_position_embeddings": 131072,
|
42 |
+
"model_type": "gpt_oss",
|
43 |
+
"num_attention_heads": 64,
|
44 |
+
"num_experts_per_tok": 4,
|
45 |
+
"num_hidden_layers": 24,
|
46 |
+
"num_key_value_heads": 8,
|
47 |
+
"num_local_experts": 32,
|
48 |
+
"output_router_logits": false,
|
49 |
+
"pad_token_id": 199999,
|
50 |
+
"rms_norm_eps": 1e-05,
|
51 |
+
"rope_scaling": {
|
52 |
+
"beta_fast": 32.0,
|
53 |
+
"beta_slow": 1.0,
|
54 |
+
"factor": 32.0,
|
55 |
+
"original_max_position_embeddings": 4096,
|
56 |
+
"rope_type": "yarn",
|
57 |
+
"truncate": false
|
58 |
+
},
|
59 |
+
"rope_theta": 150000,
|
60 |
+
"router_aux_loss_coef": 0.9,
|
61 |
+
"sliding_window": 128,
|
62 |
+
"swiglu_limit": 7.0,
|
63 |
+
"tie_word_embeddings": false,
|
64 |
+
"torch_dtype": "bfloat16",
|
65 |
+
"transformers_version": "4.55.0",
|
66 |
+
"use_cache": false,
|
67 |
+
"vocab_size": 201088
|
68 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 199998,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
200002,
|
6 |
+
199999
|
7 |
+
],
|
8 |
+
"pad_token_id": 199999,
|
9 |
+
"transformers_version": "4.55.0"
|
10 |
+
}
|
model-00001-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd1cbcd7a12fc1705eee8b57c62df3cb136f3bd82c7d4b3f631995bd8ffd1d6b
|
3 |
+
size 4504304664
|
model-00002-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3147ff83ab2875421203f12749d7885b5f1fdf7f9da864ab0d245514a11ae539
|
3 |
+
size 4939127656
|
model-00003-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4006e28feaf73bae0e096b16a1b6e05792916bf08adab07daa36876d12150524
|
3 |
+
size 4939127656
|
model-00004-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c772833b8ea84d0a5c1eaa6fb072112f575474238d94e45980b9e2f0d0bf2a85
|
3 |
+
size 4939127680
|
model-00005-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75fdaa1de84bb65a1b0a30a505eb6a0338b054cbcd4c2d2829e1bfcdb58cd0ea
|
3 |
+
size 4939127704
|
model-00006-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff7ae3b6def2061f5e668ef34b4c1d294b06e7b7fe21b149d495302de26232e1
|
3 |
+
size 4939127704
|
model-00007-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1afa34b54b9bf262db5d5a412550d76f152233389a537509e30db2a2e98372c3
|
3 |
+
size 4939127704
|
model-00008-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89b992cb64a5042b10247671e8b6b61547083e8e4fc4aa7e1198e7d4d4fa67cd
|
3 |
+
size 4939127704
|
model-00009-of-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c44d0c2bf58c932a4e64d593ab87e3b68ecf71c853d3a9c3fd9a773847a75d69
|
3 |
+
size 2751362856
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_parameters": 20914757184,
|
4 |
+
"total_size": 41829514368
|
5 |
+
},
|
6 |
+
"weight_map": {
|
7 |
+
"lm_head.weight": "model-00009-of-00009.safetensors",
|
8 |
+
"model.embed_tokens.weight": "model-00001-of-00009.safetensors",
|
9 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
10 |
+
"model.layers.0.mlp.experts.down_proj": "model-00001-of-00009.safetensors",
|
11 |
+
"model.layers.0.mlp.experts.down_proj_bias": "model-00001-of-00009.safetensors",
|
12 |
+
"model.layers.0.mlp.experts.gate_up_proj": "model-00001-of-00009.safetensors",
|
13 |
+
"model.layers.0.mlp.experts.gate_up_proj_bias": "model-00001-of-00009.safetensors",
|
14 |
+
"model.layers.0.mlp.router.bias": "model-00001-of-00009.safetensors",
|
15 |
+
"model.layers.0.mlp.router.weight": "model-00001-of-00009.safetensors",
|
16 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
17 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
|
18 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
19 |
+
"model.layers.0.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
|
20 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
21 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
|
22 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
23 |
+
"model.layers.0.self_attn.sinks": "model-00001-of-00009.safetensors",
|
24 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
|
25 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
26 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
27 |
+
"model.layers.1.mlp.experts.down_proj": "model-00001-of-00009.safetensors",
|
28 |
+
"model.layers.1.mlp.experts.down_proj_bias": "model-00001-of-00009.safetensors",
|
29 |
+
"model.layers.1.mlp.experts.gate_up_proj": "model-00001-of-00009.safetensors",
|
30 |
+
"model.layers.1.mlp.experts.gate_up_proj_bias": "model-00001-of-00009.safetensors",
|
31 |
+
"model.layers.1.mlp.router.bias": "model-00001-of-00009.safetensors",
|
32 |
+
"model.layers.1.mlp.router.weight": "model-00001-of-00009.safetensors",
|
33 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
34 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
|
35 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
36 |
+
"model.layers.1.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
|
37 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
38 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
|
39 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
40 |
+
"model.layers.1.self_attn.sinks": "model-00001-of-00009.safetensors",
|
41 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
|
42 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
43 |
+
"model.layers.10.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
44 |
+
"model.layers.10.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
|
45 |
+
"model.layers.10.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
|
46 |
+
"model.layers.10.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
|
47 |
+
"model.layers.10.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
|
48 |
+
"model.layers.10.mlp.router.bias": "model-00004-of-00009.safetensors",
|
49 |
+
"model.layers.10.mlp.router.weight": "model-00004-of-00009.safetensors",
|
50 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
51 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
|
52 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
53 |
+
"model.layers.10.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
|
54 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
55 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
|
56 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
57 |
+
"model.layers.10.self_attn.sinks": "model-00004-of-00009.safetensors",
|
58 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
|
59 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
60 |
+
"model.layers.11.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
61 |
+
"model.layers.11.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
|
62 |
+
"model.layers.11.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
|
63 |
+
"model.layers.11.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
|
64 |
+
"model.layers.11.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
|
65 |
+
"model.layers.11.mlp.router.bias": "model-00004-of-00009.safetensors",
|
66 |
+
"model.layers.11.mlp.router.weight": "model-00004-of-00009.safetensors",
|
67 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
68 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
|
69 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
70 |
+
"model.layers.11.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
|
71 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
72 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
|
73 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
74 |
+
"model.layers.11.self_attn.sinks": "model-00004-of-00009.safetensors",
|
75 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
|
76 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
77 |
+
"model.layers.12.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
78 |
+
"model.layers.12.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
|
79 |
+
"model.layers.12.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
|
80 |
+
"model.layers.12.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
|
81 |
+
"model.layers.12.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
|
82 |
+
"model.layers.12.mlp.router.bias": "model-00005-of-00009.safetensors",
|
83 |
+
"model.layers.12.mlp.router.weight": "model-00005-of-00009.safetensors",
|
84 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
85 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
|
86 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
87 |
+
"model.layers.12.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
|
88 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
89 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
|
90 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
91 |
+
"model.layers.12.self_attn.sinks": "model-00005-of-00009.safetensors",
|
92 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
|
93 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
94 |
+
"model.layers.13.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
95 |
+
"model.layers.13.mlp.experts.down_proj": "model-00005-of-00009.safetensors",
|
96 |
+
"model.layers.13.mlp.experts.down_proj_bias": "model-00005-of-00009.safetensors",
|
97 |
+
"model.layers.13.mlp.experts.gate_up_proj": "model-00005-of-00009.safetensors",
|
98 |
+
"model.layers.13.mlp.experts.gate_up_proj_bias": "model-00005-of-00009.safetensors",
|
99 |
+
"model.layers.13.mlp.router.bias": "model-00005-of-00009.safetensors",
|
100 |
+
"model.layers.13.mlp.router.weight": "model-00005-of-00009.safetensors",
|
101 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
102 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
|
103 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
104 |
+
"model.layers.13.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
|
105 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
106 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
|
107 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
108 |
+
"model.layers.13.self_attn.sinks": "model-00005-of-00009.safetensors",
|
109 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
|
110 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
111 |
+
"model.layers.14.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
112 |
+
"model.layers.14.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
|
113 |
+
"model.layers.14.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
|
114 |
+
"model.layers.14.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
|
115 |
+
"model.layers.14.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
|
116 |
+
"model.layers.14.mlp.router.bias": "model-00005-of-00009.safetensors",
|
117 |
+
"model.layers.14.mlp.router.weight": "model-00005-of-00009.safetensors",
|
118 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
119 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
|
120 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
121 |
+
"model.layers.14.self_attn.o_proj.bias": "model-00005-of-00009.safetensors",
|
122 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
123 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
|
124 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
125 |
+
"model.layers.14.self_attn.sinks": "model-00005-of-00009.safetensors",
|
126 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
|
127 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
128 |
+
"model.layers.15.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
129 |
+
"model.layers.15.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
|
130 |
+
"model.layers.15.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
|
131 |
+
"model.layers.15.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
|
132 |
+
"model.layers.15.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
|
133 |
+
"model.layers.15.mlp.router.bias": "model-00006-of-00009.safetensors",
|
134 |
+
"model.layers.15.mlp.router.weight": "model-00006-of-00009.safetensors",
|
135 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
136 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
|
137 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
138 |
+
"model.layers.15.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
|
139 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
140 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
|
141 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
142 |
+
"model.layers.15.self_attn.sinks": "model-00006-of-00009.safetensors",
|
143 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
|
144 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
145 |
+
"model.layers.16.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
146 |
+
"model.layers.16.mlp.experts.down_proj": "model-00006-of-00009.safetensors",
|
147 |
+
"model.layers.16.mlp.experts.down_proj_bias": "model-00006-of-00009.safetensors",
|
148 |
+
"model.layers.16.mlp.experts.gate_up_proj": "model-00006-of-00009.safetensors",
|
149 |
+
"model.layers.16.mlp.experts.gate_up_proj_bias": "model-00006-of-00009.safetensors",
|
150 |
+
"model.layers.16.mlp.router.bias": "model-00006-of-00009.safetensors",
|
151 |
+
"model.layers.16.mlp.router.weight": "model-00006-of-00009.safetensors",
|
152 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
153 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
|
154 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
155 |
+
"model.layers.16.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
|
156 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
157 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
|
158 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
159 |
+
"model.layers.16.self_attn.sinks": "model-00006-of-00009.safetensors",
|
160 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
|
161 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
162 |
+
"model.layers.17.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
163 |
+
"model.layers.17.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
|
164 |
+
"model.layers.17.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
|
165 |
+
"model.layers.17.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
|
166 |
+
"model.layers.17.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
|
167 |
+
"model.layers.17.mlp.router.bias": "model-00006-of-00009.safetensors",
|
168 |
+
"model.layers.17.mlp.router.weight": "model-00006-of-00009.safetensors",
|
169 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
170 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
|
171 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
172 |
+
"model.layers.17.self_attn.o_proj.bias": "model-00006-of-00009.safetensors",
|
173 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
174 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
|
175 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
176 |
+
"model.layers.17.self_attn.sinks": "model-00006-of-00009.safetensors",
|
177 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
|
178 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
179 |
+
"model.layers.18.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
180 |
+
"model.layers.18.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
|
181 |
+
"model.layers.18.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
|
182 |
+
"model.layers.18.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
|
183 |
+
"model.layers.18.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
|
184 |
+
"model.layers.18.mlp.router.bias": "model-00007-of-00009.safetensors",
|
185 |
+
"model.layers.18.mlp.router.weight": "model-00007-of-00009.safetensors",
|
186 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
187 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
|
188 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
189 |
+
"model.layers.18.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
|
190 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
191 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
|
192 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
193 |
+
"model.layers.18.self_attn.sinks": "model-00007-of-00009.safetensors",
|
194 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
|
195 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
196 |
+
"model.layers.19.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
197 |
+
"model.layers.19.mlp.experts.down_proj": "model-00007-of-00009.safetensors",
|
198 |
+
"model.layers.19.mlp.experts.down_proj_bias": "model-00007-of-00009.safetensors",
|
199 |
+
"model.layers.19.mlp.experts.gate_up_proj": "model-00007-of-00009.safetensors",
|
200 |
+
"model.layers.19.mlp.experts.gate_up_proj_bias": "model-00007-of-00009.safetensors",
|
201 |
+
"model.layers.19.mlp.router.bias": "model-00007-of-00009.safetensors",
|
202 |
+
"model.layers.19.mlp.router.weight": "model-00007-of-00009.safetensors",
|
203 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
204 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
|
205 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
206 |
+
"model.layers.19.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
|
207 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
208 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
|
209 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
210 |
+
"model.layers.19.self_attn.sinks": "model-00007-of-00009.safetensors",
|
211 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
|
212 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
213 |
+
"model.layers.2.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
214 |
+
"model.layers.2.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
|
215 |
+
"model.layers.2.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
|
216 |
+
"model.layers.2.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
|
217 |
+
"model.layers.2.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
|
218 |
+
"model.layers.2.mlp.router.bias": "model-00001-of-00009.safetensors",
|
219 |
+
"model.layers.2.mlp.router.weight": "model-00001-of-00009.safetensors",
|
220 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
221 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
|
222 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
223 |
+
"model.layers.2.self_attn.o_proj.bias": "model-00001-of-00009.safetensors",
|
224 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
225 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
|
226 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
227 |
+
"model.layers.2.self_attn.sinks": "model-00001-of-00009.safetensors",
|
228 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
|
229 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
230 |
+
"model.layers.20.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
231 |
+
"model.layers.20.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
|
232 |
+
"model.layers.20.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
|
233 |
+
"model.layers.20.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
|
234 |
+
"model.layers.20.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
|
235 |
+
"model.layers.20.mlp.router.bias": "model-00007-of-00009.safetensors",
|
236 |
+
"model.layers.20.mlp.router.weight": "model-00007-of-00009.safetensors",
|
237 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
238 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
|
239 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
240 |
+
"model.layers.20.self_attn.o_proj.bias": "model-00007-of-00009.safetensors",
|
241 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
242 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
|
243 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
244 |
+
"model.layers.20.self_attn.sinks": "model-00007-of-00009.safetensors",
|
245 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
|
246 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
247 |
+
"model.layers.21.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
248 |
+
"model.layers.21.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
|
249 |
+
"model.layers.21.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
|
250 |
+
"model.layers.21.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
|
251 |
+
"model.layers.21.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
|
252 |
+
"model.layers.21.mlp.router.bias": "model-00008-of-00009.safetensors",
|
253 |
+
"model.layers.21.mlp.router.weight": "model-00008-of-00009.safetensors",
|
254 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
255 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
|
256 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
257 |
+
"model.layers.21.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
|
258 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
259 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
|
260 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
261 |
+
"model.layers.21.self_attn.sinks": "model-00008-of-00009.safetensors",
|
262 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
|
263 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
264 |
+
"model.layers.22.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
265 |
+
"model.layers.22.mlp.experts.down_proj": "model-00008-of-00009.safetensors",
|
266 |
+
"model.layers.22.mlp.experts.down_proj_bias": "model-00008-of-00009.safetensors",
|
267 |
+
"model.layers.22.mlp.experts.gate_up_proj": "model-00008-of-00009.safetensors",
|
268 |
+
"model.layers.22.mlp.experts.gate_up_proj_bias": "model-00008-of-00009.safetensors",
|
269 |
+
"model.layers.22.mlp.router.bias": "model-00008-of-00009.safetensors",
|
270 |
+
"model.layers.22.mlp.router.weight": "model-00008-of-00009.safetensors",
|
271 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
272 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
|
273 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
274 |
+
"model.layers.22.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
|
275 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
276 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
|
277 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
278 |
+
"model.layers.22.self_attn.sinks": "model-00008-of-00009.safetensors",
|
279 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
|
280 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
281 |
+
"model.layers.23.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
282 |
+
"model.layers.23.mlp.experts.down_proj": "model-00009-of-00009.safetensors",
|
283 |
+
"model.layers.23.mlp.experts.down_proj_bias": "model-00009-of-00009.safetensors",
|
284 |
+
"model.layers.23.mlp.experts.gate_up_proj": "model-00009-of-00009.safetensors",
|
285 |
+
"model.layers.23.mlp.experts.gate_up_proj_bias": "model-00009-of-00009.safetensors",
|
286 |
+
"model.layers.23.mlp.router.bias": "model-00008-of-00009.safetensors",
|
287 |
+
"model.layers.23.mlp.router.weight": "model-00008-of-00009.safetensors",
|
288 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
289 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
|
290 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
291 |
+
"model.layers.23.self_attn.o_proj.bias": "model-00008-of-00009.safetensors",
|
292 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
293 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
|
294 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
295 |
+
"model.layers.23.self_attn.sinks": "model-00008-of-00009.safetensors",
|
296 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
|
297 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
298 |
+
"model.layers.3.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
299 |
+
"model.layers.3.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
|
300 |
+
"model.layers.3.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
|
301 |
+
"model.layers.3.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
|
302 |
+
"model.layers.3.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
|
303 |
+
"model.layers.3.mlp.router.bias": "model-00002-of-00009.safetensors",
|
304 |
+
"model.layers.3.mlp.router.weight": "model-00002-of-00009.safetensors",
|
305 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
306 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
|
307 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
308 |
+
"model.layers.3.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
|
309 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
310 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
|
311 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
312 |
+
"model.layers.3.self_attn.sinks": "model-00002-of-00009.safetensors",
|
313 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
|
314 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
315 |
+
"model.layers.4.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
316 |
+
"model.layers.4.mlp.experts.down_proj": "model-00002-of-00009.safetensors",
|
317 |
+
"model.layers.4.mlp.experts.down_proj_bias": "model-00002-of-00009.safetensors",
|
318 |
+
"model.layers.4.mlp.experts.gate_up_proj": "model-00002-of-00009.safetensors",
|
319 |
+
"model.layers.4.mlp.experts.gate_up_proj_bias": "model-00002-of-00009.safetensors",
|
320 |
+
"model.layers.4.mlp.router.bias": "model-00002-of-00009.safetensors",
|
321 |
+
"model.layers.4.mlp.router.weight": "model-00002-of-00009.safetensors",
|
322 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
323 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
|
324 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
325 |
+
"model.layers.4.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
|
326 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
327 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
|
328 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
329 |
+
"model.layers.4.self_attn.sinks": "model-00002-of-00009.safetensors",
|
330 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
|
331 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
332 |
+
"model.layers.5.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
333 |
+
"model.layers.5.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
|
334 |
+
"model.layers.5.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
|
335 |
+
"model.layers.5.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
|
336 |
+
"model.layers.5.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
|
337 |
+
"model.layers.5.mlp.router.bias": "model-00002-of-00009.safetensors",
|
338 |
+
"model.layers.5.mlp.router.weight": "model-00002-of-00009.safetensors",
|
339 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
340 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
|
341 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
342 |
+
"model.layers.5.self_attn.o_proj.bias": "model-00002-of-00009.safetensors",
|
343 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
344 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
|
345 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
346 |
+
"model.layers.5.self_attn.sinks": "model-00002-of-00009.safetensors",
|
347 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
|
348 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
349 |
+
"model.layers.6.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
350 |
+
"model.layers.6.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
|
351 |
+
"model.layers.6.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
|
352 |
+
"model.layers.6.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
|
353 |
+
"model.layers.6.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
|
354 |
+
"model.layers.6.mlp.router.bias": "model-00003-of-00009.safetensors",
|
355 |
+
"model.layers.6.mlp.router.weight": "model-00003-of-00009.safetensors",
|
356 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
357 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
|
358 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
359 |
+
"model.layers.6.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
|
360 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
361 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
|
362 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
363 |
+
"model.layers.6.self_attn.sinks": "model-00003-of-00009.safetensors",
|
364 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
|
365 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
366 |
+
"model.layers.7.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
367 |
+
"model.layers.7.mlp.experts.down_proj": "model-00003-of-00009.safetensors",
|
368 |
+
"model.layers.7.mlp.experts.down_proj_bias": "model-00003-of-00009.safetensors",
|
369 |
+
"model.layers.7.mlp.experts.gate_up_proj": "model-00003-of-00009.safetensors",
|
370 |
+
"model.layers.7.mlp.experts.gate_up_proj_bias": "model-00003-of-00009.safetensors",
|
371 |
+
"model.layers.7.mlp.router.bias": "model-00003-of-00009.safetensors",
|
372 |
+
"model.layers.7.mlp.router.weight": "model-00003-of-00009.safetensors",
|
373 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
374 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
|
375 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
376 |
+
"model.layers.7.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
|
377 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
378 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
|
379 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
380 |
+
"model.layers.7.self_attn.sinks": "model-00003-of-00009.safetensors",
|
381 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
|
382 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
383 |
+
"model.layers.8.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
384 |
+
"model.layers.8.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
|
385 |
+
"model.layers.8.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
|
386 |
+
"model.layers.8.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
|
387 |
+
"model.layers.8.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
|
388 |
+
"model.layers.8.mlp.router.bias": "model-00003-of-00009.safetensors",
|
389 |
+
"model.layers.8.mlp.router.weight": "model-00003-of-00009.safetensors",
|
390 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
391 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
|
392 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
393 |
+
"model.layers.8.self_attn.o_proj.bias": "model-00003-of-00009.safetensors",
|
394 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
395 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
|
396 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
397 |
+
"model.layers.8.self_attn.sinks": "model-00003-of-00009.safetensors",
|
398 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
|
399 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
400 |
+
"model.layers.9.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
401 |
+
"model.layers.9.mlp.experts.down_proj": "model-00004-of-00009.safetensors",
|
402 |
+
"model.layers.9.mlp.experts.down_proj_bias": "model-00004-of-00009.safetensors",
|
403 |
+
"model.layers.9.mlp.experts.gate_up_proj": "model-00004-of-00009.safetensors",
|
404 |
+
"model.layers.9.mlp.experts.gate_up_proj_bias": "model-00004-of-00009.safetensors",
|
405 |
+
"model.layers.9.mlp.router.bias": "model-00004-of-00009.safetensors",
|
406 |
+
"model.layers.9.mlp.router.weight": "model-00004-of-00009.safetensors",
|
407 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
408 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
|
409 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
410 |
+
"model.layers.9.self_attn.o_proj.bias": "model-00004-of-00009.safetensors",
|
411 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
412 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
|
413 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
414 |
+
"model.layers.9.self_attn.sinks": "model-00004-of-00009.safetensors",
|
415 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
|
416 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
417 |
+
"model.norm.weight": "model-00009-of-00009.safetensors"
|
418 |
+
}
|
419 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|return|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0614fe83cadab421296e664e1f48f4261fa8fef6e03e63bb75c20f38e37d07d3
|
3 |
+
size 27868174
|
tokenizer_config.json
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"199998": {
|
4 |
+
"content": "<|startoftext|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"199999": {
|
12 |
+
"content": "<|endoftext|>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"200000": {
|
20 |
+
"content": "<|reserved_200000|>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"200001": {
|
28 |
+
"content": "<|reserved_200001|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"200002": {
|
36 |
+
"content": "<|return|>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"200003": {
|
44 |
+
"content": "<|constrain|>",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
},
|
51 |
+
"200004": {
|
52 |
+
"content": "<|reserved_200004|>",
|
53 |
+
"lstrip": false,
|
54 |
+
"normalized": false,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": true
|
58 |
+
},
|
59 |
+
"200005": {
|
60 |
+
"content": "<|channel|>",
|
61 |
+
"lstrip": false,
|
62 |
+
"normalized": false,
|
63 |
+
"rstrip": false,
|
64 |
+
"single_word": false,
|
65 |
+
"special": true
|
66 |
+
},
|
67 |
+
"200006": {
|
68 |
+
"content": "<|start|>",
|
69 |
+
"lstrip": false,
|
70 |
+
"normalized": false,
|
71 |
+
"rstrip": false,
|
72 |
+
"single_word": false,
|
73 |
+
"special": true
|
74 |
+
},
|
75 |
+
"200007": {
|
76 |
+
"content": "<|end|>",
|
77 |
+
"lstrip": false,
|
78 |
+
"normalized": false,
|
79 |
+
"rstrip": false,
|
80 |
+
"single_word": false,
|
81 |
+
"special": true
|
82 |
+
},
|
83 |
+
"200008": {
|
84 |
+
"content": "<|message|>",
|
85 |
+
"lstrip": false,
|
86 |
+
"normalized": false,
|
87 |
+
"rstrip": false,
|
88 |
+
"single_word": false,
|
89 |
+
"special": true
|
90 |
+
},
|
91 |
+
"200009": {
|
92 |
+
"content": "<|reserved_200009|>",
|
93 |
+
"lstrip": false,
|
94 |
+
"normalized": false,
|
95 |
+
"rstrip": false,
|
96 |
+
"single_word": false,
|
97 |
+
"special": true
|
98 |
+
},
|
99 |
+
"200010": {
|
100 |
+
"content": "<|reserved_200010|>",
|
101 |
+
"lstrip": false,
|
102 |
+
"normalized": false,
|
103 |
+
"rstrip": false,
|
104 |
+
"single_word": false,
|
105 |
+
"special": true
|
106 |
+
},
|
107 |
+
"200011": {
|
108 |
+
"content": "<|reserved_200011|>",
|
109 |
+
"lstrip": false,
|
110 |
+
"normalized": false,
|
111 |
+
"rstrip": false,
|
112 |
+
"single_word": false,
|
113 |
+
"special": true
|
114 |
+
},
|
115 |
+
"200012": {
|
116 |
+
"content": "<|call|>",
|
117 |
+
"lstrip": false,
|
118 |
+
"normalized": false,
|
119 |
+
"rstrip": false,
|
120 |
+
"single_word": false,
|
121 |
+
"special": true
|
122 |
+
},
|
123 |
+
"200013": {
|
124 |
+
"content": "<|reserved_200013|>",
|
125 |
+
"lstrip": false,
|
126 |
+
"normalized": false,
|
127 |
+
"rstrip": false,
|
128 |
+
"single_word": false,
|
129 |
+
"special": true
|
130 |
+
},
|
131 |
+
"200014": {
|
132 |
+
"content": "<|reserved_200014|>",
|
133 |
+
"lstrip": false,
|
134 |
+
"normalized": false,
|
135 |
+
"rstrip": false,
|
136 |
+
"single_word": false,
|
137 |
+
"special": true
|
138 |
+
},
|
139 |
+
"200015": {
|
140 |
+
"content": "<|reserved_200015|>",
|
141 |
+
"lstrip": false,
|
142 |
+
"normalized": false,
|
143 |
+
"rstrip": false,
|
144 |
+
"single_word": false,
|
145 |
+
"special": true
|
146 |
+
},
|
147 |
+
"200016": {
|
148 |
+
"content": "<|reserved_200016|>",
|
149 |
+
"lstrip": false,
|
150 |
+
"normalized": false,
|
151 |
+
"rstrip": false,
|
152 |
+
"single_word": false,
|
153 |
+
"special": true
|
154 |
+
},
|
155 |
+
"200017": {
|
156 |
+
"content": "<|reserved_200017|>",
|
157 |
+
"lstrip": false,
|
158 |
+
"normalized": false,
|
159 |
+
"rstrip": false,
|
160 |
+
"single_word": false,
|
161 |
+
"special": true
|
162 |
+
},
|
163 |
+
"200018": {
|
164 |
+
"content": "<|endofprompt|>",
|
165 |
+
"lstrip": false,
|
166 |
+
"normalized": false,
|
167 |
+
"rstrip": false,
|
168 |
+
"single_word": false,
|
169 |
+
"special": true
|
170 |
+
}
|
171 |
+
},
|
172 |
+
"bos_token": "<|startoftext|>",
|
173 |
+
"clean_up_tokenization_spaces": false,
|
174 |
+
"eos_token": "<|return|>",
|
175 |
+
"extra_special_tokens": {},
|
176 |
+
"model_input_names": [
|
177 |
+
"input_ids",
|
178 |
+
"attention_mask"
|
179 |
+
],
|
180 |
+
"model_max_length": 1000000000000000019884624838656,
|
181 |
+
"pad_token": "<|endoftext|>",
|
182 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
183 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fa4b843cb6bcb99be8589958426e121040c1346b3a28c5c5062def1beab2d33
|
3 |
+
size 8723
|