atsuki-yamaguchi commited on
Commit
acfc6fc
·
verified ·
1 Parent(s): fa70d97

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,36 +1,21 @@
1
  ---
2
- license: mit
3
- language:
4
- - sw
5
  ---
6
- TigerBot-7B LAPT + Heuristics Swahili
7
- ===
8
 
9
- ## How to use
10
- ```python
11
- from peft import AutoPeftModelForCausalLM
12
- from transformers import AutoTokenizer
13
 
14
- model = AutoPeftModelForCausalLM.from_pretrained(
15
- "atsuki-yamaguchi/tigerbot-7b-base-heuristics-sw"
16
- )
17
- tokenizer = AutoTokenizer.from_pretrained(
18
- "atsuki-yamaguchi/tigerbot-7b-base-heuristics-sw"
19
- )
20
- ```
 
 
 
 
 
21
 
22
- ## Citation
23
- ```
24
- @article{yamaguchi2024empirical,
25
- title={An Empirical Study on Cross-lingual Vocabulary Adaptation for Efficient Generative {LLM} Inference},
26
- author={Atsuki Yamaguchi and Aline Villavicencio and Nikolaos Aletras},
27
- journal={ArXiv},
28
- year={2024},
29
- volume={abs/2402.10712},
30
- url={https://arxiv.org/abs/2402.10712}
31
- }
32
- ```
33
-
34
- ## Link
35
- For more details, please visit https://github.com/gucci-j/llm-cva
36
 
 
 
1
  ---
2
+ library_name: peft
 
 
3
  ---
4
+ ## Training procedure
 
5
 
 
 
 
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ - PEFT 0.5.0
adapter_config.json CHANGED
@@ -1,29 +1 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "atsuki-yamaguchi/tigerbot-7b-base-heuristics-sw",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 32,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": [
13
- "lm_head",
14
- "embed_tokens"
15
- ],
16
- "peft_type": "LORA",
17
- "r": 8,
18
- "revision": null,
19
- "target_modules": [
20
- "q_proj",
21
- "v_proj",
22
- "k_proj",
23
- "o_proj",
24
- "gate_proj",
25
- "down_proj",
26
- "up_proj"
27
- ],
28
- "task_type": "CAUSAL_LM"
29
- }
 
1
+ {"auto_mapping": null, "base_model_name_or_path": "atsuki-yamaguchi/tigerbot-7b-base-heuristics-sw", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": ["lm_head", "embed_tokens"], "peft_type": "LORA", "r": 8, "revision": null, "target_modules": ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "down_proj", "up_proj"], "task_type": "CAUSAL_LM"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/mnt/parscratch/users/acp23ay/private/models/tigerbot-7b-base-sw-heuristics",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -21,7 +21,7 @@
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
- "torch_dtype": "float32",
25
  "transformers_version": "4.35.0.dev0",
26
  "use_cache": true,
27
  "vocab_size": 50264
 
1
  {
2
+ "_name_or_path": "TigerResearch/tigerbot-7b-base",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
+ "torch_dtype": "float64",
25
  "transformers_version": "4.35.0.dev0",
26
  "use_cache": true,
27
  "vocab_size": 50264
model-00001-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a2ffad4267c12c8d32c5b4cb643c8e6a57b2b79f2065acc7a1d3b571226935
3
- size 4938306856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532be22851bb77b3d0dcf25562f0646535ff14737af6538cf659da2b78cdabcb
3
+ size 4952297760
model-00002-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c0f990f310101840de528cc3529a2dfb820d8cf33ba93c3e24ffd75206d7533
3
- size 4991424816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9c37a245ee5ad4472f4260ab2adc127ec752a1ef2ffe0a61148affecc49caaf
3
+ size 4991424808
model-00003-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:636f81aa82b462ecc0c32ca267e10bbd405f978003f00255317c4e416a90a263
3
  size 4924315880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8348f7bd98b1fc9689bb7b369d2ca4207d31f52c2743fdd218f9993d4eb0c613
3
  size 4924315880
model-00004-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6e5060c0336d0f5ec66cc007a3428542f6946f32027a15b888844006b4c4158
3
  size 4857206904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f0fdc5a923b1f8ea222154381f364870ab577211eb72fc909d5e23c054b108
3
  size 4857206904
model-00005-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48de40589456afa1eb370c85d74d127f9526ab536facc7e03a18805607113fcc
3
  size 4857206904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6af6f4bc0f17b80add0bd36b79310b37014cad2539a442ed43c7ca140f67eb77
3
  size 4857206904
model-00006-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e28b2e0f832c68d560ca60e48f537fef849bc80a4cd9ef7a940c0a3355d2abb0
3
- size 2983709496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be87d1220ad8ec0e251cbe952af8b735b007bab21c646732421e6687c3a8ac8b
3
+ size 3793243976
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 27552137216
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00006-of-00006.safetensors",
@@ -23,13 +23,13 @@
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
28
- "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
29
- "model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
32
- "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00003-of-00006.safetensors",
@@ -37,10 +37,10 @@
37
  "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
40
- "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
41
  "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
43
- "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00003-of-00006.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
@@ -77,11 +77,11 @@
77
  "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
78
  "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
79
  "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
80
- "model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
81
- "model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
82
- "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
83
- "model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
84
- "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
85
  "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
86
  "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
87
  "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
@@ -91,10 +91,10 @@
91
  "model.layers.17.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
92
  "model.layers.17.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
93
  "model.layers.17.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
94
- "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
95
- "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
96
- "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
97
- "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
98
  "model.layers.18.input_layernorm.weight": "model-00004-of-00006.safetensors",
99
  "model.layers.18.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
100
  "model.layers.18.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
@@ -140,11 +140,11 @@
140
  "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
@@ -154,10 +154,10 @@
154
  "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
155
  "model.layers.23.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00005-of-00006.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
@@ -194,11 +194,11 @@
194
  "model.layers.27.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
195
  "model.layers.27.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
196
  "model.layers.27.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
197
- "model.layers.28.input_layernorm.weight": "model-00005-of-00006.safetensors",
198
- "model.layers.28.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
199
- "model.layers.28.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
200
- "model.layers.28.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
201
- "model.layers.28.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
202
  "model.layers.28.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
203
  "model.layers.28.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
204
  "model.layers.28.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
@@ -208,10 +208,10 @@
208
  "model.layers.29.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
209
  "model.layers.29.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
210
  "model.layers.29.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
211
- "model.layers.29.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
212
- "model.layers.29.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
213
- "model.layers.29.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
214
- "model.layers.29.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
215
  "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
216
  "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
217
  "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
@@ -239,15 +239,15 @@
239
  "model.layers.31.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
240
  "model.layers.31.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
241
  "model.layers.31.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
242
- "model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
243
- "model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
244
- "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
245
- "model.layers.4.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
246
- "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
247
- "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
248
- "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
249
  "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
250
- "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
251
  "model.layers.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
252
  "model.layers.5.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
253
  "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
@@ -255,7 +255,7 @@
255
  "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
256
  "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
257
  "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
258
- "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
259
  "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
260
  "model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors",
261
  "model.layers.6.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 28375662592
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00006-of-00006.safetensors",
 
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00003-of-00006.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00003-of-00006.safetensors",
 
37
  "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
41
  "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00003-of-00006.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
 
77
  "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
78
  "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
79
  "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00004-of-00006.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
85
  "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
86
  "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
87
  "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
 
91
  "model.layers.17.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
92
  "model.layers.17.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
93
  "model.layers.17.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
98
  "model.layers.18.input_layernorm.weight": "model-00004-of-00006.safetensors",
99
  "model.layers.18.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
100
  "model.layers.18.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
 
140
  "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00005-of-00006.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
 
154
  "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
155
  "model.layers.23.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00005-of-00006.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
 
194
  "model.layers.27.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
195
  "model.layers.27.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
196
  "model.layers.27.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00006-of-00006.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
202
  "model.layers.28.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
203
  "model.layers.28.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
204
  "model.layers.28.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
 
208
  "model.layers.29.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
209
  "model.layers.29.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
210
  "model.layers.29.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
215
  "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
216
  "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
217
  "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
 
239
  "model.layers.31.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
240
  "model.layers.31.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
241
  "model.layers.31.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00006.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
249
  "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
251
  "model.layers.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
252
  "model.layers.5.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
253
  "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
 
255
  "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
256
  "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
257
  "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
259
  "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
260
  "model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors",
261
  "model.layers.6.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0ff80a7465b87b4c4dca60afbf1360d89ee87bb0c56e18b7b6499655878709
3
+ size 865706972
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a8f3b29e0bd2cc11dc0bd5c8d6115a7d91eafaa4a330189e237f189325c2c38
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d403cd2fa0f47b387bc486ed7a0a0523b4d4d12118efa47d66edfd3fa514ef0
3
+ size 1064
special_tokens_map.json CHANGED
@@ -1,23 +1,5 @@
1
  {
2
- "bos_token": {
3
- "content": "<|endoftext|>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "unk_token": {
17
- "content": "<|endoftext|>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- }
23
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b244ab5c166e93da6b6a49e8c27efbe55a805da74152d96651e3f7f526154e26
3
+ size 4664