FlameF0X commited on
Commit
60e7410
·
verified ·
1 Parent(s): 005e5f1

Upload 13 files

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 64,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 960,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 2560,
14
+ "is_llama_config": true,
15
+ "max_position_embeddings": 8192,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 15,
19
+ "num_hidden_layers": 32,
20
+ "num_key_value_heads": 5,
21
+ "pad_token_id": 2,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_interleaved": false,
25
+ "rope_scaling": null,
26
+ "rope_theta": 100000,
27
+ "tie_word_embeddings": true,
28
+ "torch_dtype": "float32",
29
+ "transformers.js_config": {
30
+ "kv_cache_dtype": {
31
+ "fp16": "float16",
32
+ "q4f16": "float16"
33
+ }
34
+ },
35
+ "transformers_version": "4.51.3",
36
+ "use_cache": true,
37
+ "vocab_size": 49152
38
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.51.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b104ce0c1a82268048b4e6115987dac3620ccf84d5fa75b02524e92467008057
3
+ size 1447317080
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e97708c6473aedd286676ef1f83c09b4aef122616292c13805df4fe5b34d5e93
3
+ size 2894813242
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9cd6a0487226e5bd30d1846894c82af483733ab4381b75bae9c0745e05d405
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d12cf3684ad8e007d287c4e922d4fc70b61c25a000341ea727fc57200243819b
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<|im_start|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|im_end|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
144
+ ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
+ "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
+ "extra_special_tokens": {},
150
+ "model_max_length": 8192,
151
+ "pad_token": "<|im_end|>",
152
+ "tokenizer_class": "GPT2Tokenizer",
153
+ "unk_token": "<|endoftext|>",
154
+ "vocab_size": 49152
155
+ }
trainer_state.json ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 472,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0211864406779661,
14
+ "grad_norm": 4.637205123901367,
15
+ "learning_rate": 4.904661016949153e-05,
16
+ "loss": 3.0019,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.0423728813559322,
21
+ "grad_norm": 4.062796115875244,
22
+ "learning_rate": 4.7987288135593225e-05,
23
+ "loss": 2.6502,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.0635593220338983,
28
+ "grad_norm": 3.6472411155700684,
29
+ "learning_rate": 4.692796610169492e-05,
30
+ "loss": 2.4454,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.0847457627118644,
35
+ "grad_norm": 3.438002347946167,
36
+ "learning_rate": 4.5868644067796616e-05,
37
+ "loss": 2.2664,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.1059322033898305,
42
+ "grad_norm": 3.674481153488159,
43
+ "learning_rate": 4.480932203389831e-05,
44
+ "loss": 2.3392,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.1271186440677966,
49
+ "grad_norm": 4.97629976272583,
50
+ "learning_rate": 4.375e-05,
51
+ "loss": 2.192,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.1483050847457627,
56
+ "grad_norm": 3.4427733421325684,
57
+ "learning_rate": 4.2690677966101695e-05,
58
+ "loss": 2.1182,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.1694915254237288,
63
+ "grad_norm": 3.9113576412200928,
64
+ "learning_rate": 4.163135593220339e-05,
65
+ "loss": 2.1282,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.1906779661016949,
70
+ "grad_norm": 3.6016507148742676,
71
+ "learning_rate": 4.0572033898305086e-05,
72
+ "loss": 2.1691,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.211864406779661,
77
+ "grad_norm": 3.363358974456787,
78
+ "learning_rate": 3.951271186440678e-05,
79
+ "loss": 2.0797,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.2330508474576271,
84
+ "grad_norm": 3.2457995414733887,
85
+ "learning_rate": 3.8453389830508476e-05,
86
+ "loss": 2.219,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.2542372881355932,
91
+ "grad_norm": 3.339585065841675,
92
+ "learning_rate": 3.739406779661017e-05,
93
+ "loss": 2.1114,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.2754237288135593,
98
+ "grad_norm": 3.4949917793273926,
99
+ "learning_rate": 3.633474576271187e-05,
100
+ "loss": 1.966,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.2966101694915254,
105
+ "grad_norm": 3.223611354827881,
106
+ "learning_rate": 3.527542372881356e-05,
107
+ "loss": 2.1414,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.3177966101694915,
112
+ "grad_norm": 3.4801230430603027,
113
+ "learning_rate": 3.421610169491525e-05,
114
+ "loss": 2.0981,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.3389830508474576,
119
+ "grad_norm": 3.30033540725708,
120
+ "learning_rate": 3.315677966101695e-05,
121
+ "loss": 1.9982,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.3601694915254237,
126
+ "grad_norm": 3.305997610092163,
127
+ "learning_rate": 3.209745762711864e-05,
128
+ "loss": 1.9845,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.3813559322033898,
133
+ "grad_norm": 3.4289534091949463,
134
+ "learning_rate": 3.1038135593220344e-05,
135
+ "loss": 2.0987,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.4025423728813559,
140
+ "grad_norm": 3.018153190612793,
141
+ "learning_rate": 2.9978813559322032e-05,
142
+ "loss": 2.0208,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.423728813559322,
147
+ "grad_norm": 3.609093427658081,
148
+ "learning_rate": 2.891949152542373e-05,
149
+ "loss": 1.996,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.4449152542372881,
154
+ "grad_norm": 4.0159220695495605,
155
+ "learning_rate": 2.7860169491525423e-05,
156
+ "loss": 1.9612,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.4661016949152542,
161
+ "grad_norm": 3.264458179473877,
162
+ "learning_rate": 2.6800847457627122e-05,
163
+ "loss": 2.0203,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.4872881355932203,
168
+ "grad_norm": 3.696259021759033,
169
+ "learning_rate": 2.5741525423728814e-05,
170
+ "loss": 2.0867,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.5084745762711864,
175
+ "grad_norm": 4.3933796882629395,
176
+ "learning_rate": 2.468220338983051e-05,
177
+ "loss": 1.9675,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.5296610169491526,
182
+ "grad_norm": 3.876000165939331,
183
+ "learning_rate": 2.3622881355932204e-05,
184
+ "loss": 2.0325,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.5508474576271186,
189
+ "grad_norm": 3.8333613872528076,
190
+ "learning_rate": 2.25635593220339e-05,
191
+ "loss": 1.9674,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.5720338983050848,
196
+ "grad_norm": 3.398927688598633,
197
+ "learning_rate": 2.1504237288135595e-05,
198
+ "loss": 1.9873,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.5932203389830508,
203
+ "grad_norm": 2.980912446975708,
204
+ "learning_rate": 2.044491525423729e-05,
205
+ "loss": 1.9447,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.614406779661017,
210
+ "grad_norm": 4.0269246101379395,
211
+ "learning_rate": 1.9385593220338986e-05,
212
+ "loss": 1.9553,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.635593220338983,
217
+ "grad_norm": 3.15983247756958,
218
+ "learning_rate": 1.832627118644068e-05,
219
+ "loss": 1.9081,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.6567796610169492,
224
+ "grad_norm": 4.2125935554504395,
225
+ "learning_rate": 1.7266949152542373e-05,
226
+ "loss": 1.9373,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 0.6779661016949152,
231
+ "grad_norm": 3.3405373096466064,
232
+ "learning_rate": 1.620762711864407e-05,
233
+ "loss": 1.8637,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 0.6991525423728814,
238
+ "grad_norm": 3.782801628112793,
239
+ "learning_rate": 1.5148305084745764e-05,
240
+ "loss": 2.029,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 0.7203389830508474,
245
+ "grad_norm": 3.6767022609710693,
246
+ "learning_rate": 1.408898305084746e-05,
247
+ "loss": 1.7768,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 0.7415254237288136,
252
+ "grad_norm": 3.3231544494628906,
253
+ "learning_rate": 1.3029661016949155e-05,
254
+ "loss": 1.9421,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 0.7627118644067796,
259
+ "grad_norm": 3.579037666320801,
260
+ "learning_rate": 1.1970338983050848e-05,
261
+ "loss": 1.8764,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 0.7838983050847458,
266
+ "grad_norm": 3.5025410652160645,
267
+ "learning_rate": 1.0911016949152544e-05,
268
+ "loss": 1.8414,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 0.8050847457627118,
273
+ "grad_norm": 4.006308078765869,
274
+ "learning_rate": 9.851694915254237e-06,
275
+ "loss": 1.8852,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 0.826271186440678,
280
+ "grad_norm": 3.3026678562164307,
281
+ "learning_rate": 8.792372881355933e-06,
282
+ "loss": 1.9024,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 0.847457627118644,
287
+ "grad_norm": 3.574389696121216,
288
+ "learning_rate": 7.733050847457628e-06,
289
+ "loss": 1.8935,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 0.8686440677966102,
294
+ "grad_norm": 3.5349714756011963,
295
+ "learning_rate": 6.6737288135593225e-06,
296
+ "loss": 1.8763,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 0.8898305084745762,
301
+ "grad_norm": 3.3055741786956787,
302
+ "learning_rate": 5.614406779661018e-06,
303
+ "loss": 1.9133,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 0.9110169491525424,
308
+ "grad_norm": 5.87731409072876,
309
+ "learning_rate": 4.5550847457627115e-06,
310
+ "loss": 1.9598,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 0.9322033898305084,
315
+ "grad_norm": 3.8937673568725586,
316
+ "learning_rate": 3.495762711864407e-06,
317
+ "loss": 1.8699,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 0.9533898305084746,
322
+ "grad_norm": 3.4998295307159424,
323
+ "learning_rate": 2.436440677966102e-06,
324
+ "loss": 1.9595,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 0.9745762711864406,
329
+ "grad_norm": 3.4720184803009033,
330
+ "learning_rate": 1.3771186440677967e-06,
331
+ "loss": 1.8245,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 0.9957627118644068,
336
+ "grad_norm": 3.557469367980957,
337
+ "learning_rate": 3.1779661016949154e-07,
338
+ "loss": 2.0716,
339
+ "step": 470
340
+ }
341
+ ],
342
+ "logging_steps": 10,
343
+ "max_steps": 472,
344
+ "num_input_tokens_seen": 0,
345
+ "num_train_epochs": 1,
346
+ "save_steps": 500,
347
+ "stateful_callbacks": {
348
+ "TrainerControl": {
349
+ "args": {
350
+ "should_epoch_stop": false,
351
+ "should_evaluate": false,
352
+ "should_log": false,
353
+ "should_save": true,
354
+ "should_training_stop": true
355
+ },
356
+ "attributes": {}
357
+ }
358
+ },
359
+ "total_flos": 1823897464012800.0,
360
+ "train_batch_size": 4,
361
+ "trial_name": null,
362
+ "trial_params": null
363
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c24a2872deda9cf599d0d42271894133d8d01e8bf90b7be8cdf72e46047b23
3
+ size 5304
vocab.json ADDED
The diff for this file is too large to render. See raw diff