tyzhu commited on
Commit
22bc438
·
verified ·
1 Parent(s): 4a3ac5a

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +15 -0
  3. eval_results.json +10 -0
  4. tokenizer.json +1 -6
  5. train_results.json +8 -0
  6. trainer_state.json +616 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: llama2
3
  base_model: meta-llama/Llama-2-7b-hf
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_docidx_meta-llama_Llama-2-7b-hf_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,7 +27,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # lmind_hotpot_train8000_eval7405_v1_docidx_meta-llama_Llama-2-7b-hf_lora2
17
 
18
- This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.7676
21
  - Accuracy: 0.7929
 
3
  base_model: meta-llama/Llama-2-7b-hf
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_docidx
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_docidx_meta-llama_Llama-2-7b-hf_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_docidx
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_docidx
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.7928508287292818
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
27
 
28
  # lmind_hotpot_train8000_eval7405_v1_docidx_meta-llama_Llama-2-7b-hf_lora2
29
 
30
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_docidx dataset.
31
  It achieves the following results on the evaluation set:
32
  - Loss: 0.7676
33
  - Accuracy: 0.7929
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.7928508287292818,
4
+ "eval_loss": 0.7676272392272949,
5
+ "eval_runtime": 8.241,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 60.672,
8
+ "eval_steps_per_second": 7.645,
9
+ "perplexity": 2.1546477200691716,
10
+ "train_loss": 0.750629533715413,
11
+ "train_runtime": 6999.9579,
12
+ "train_samples": 26854,
13
+ "train_samples_per_second": 38.363,
14
+ "train_steps_per_second": 1.199
15
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.7928508287292818,
4
+ "eval_loss": 0.7676272392272949,
5
+ "eval_runtime": 8.241,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 60.672,
8
+ "eval_steps_per_second": 7.645,
9
+ "perplexity": 2.1546477200691716
10
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.750629533715413,
4
+ "train_runtime": 6999.9579,
5
+ "train_samples": 26854,
6
+ "train_samples_per_second": 38.363,
7
+ "train_steps_per_second": 1.199
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.997021149836163,
5
+ "eval_steps": 500,
6
+ "global_step": 8390,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12,
13
+ "learning_rate": 0.0001,
14
+ "loss": 1.2512,
15
+ "step": 100
16
+ },
17
+ {
18
+ "epoch": 0.24,
19
+ "learning_rate": 0.0001,
20
+ "loss": 1.1216,
21
+ "step": 200
22
+ },
23
+ {
24
+ "epoch": 0.36,
25
+ "learning_rate": 0.0001,
26
+ "loss": 1.1222,
27
+ "step": 300
28
+ },
29
+ {
30
+ "epoch": 0.48,
31
+ "learning_rate": 0.0001,
32
+ "loss": 1.1035,
33
+ "step": 400
34
+ },
35
+ {
36
+ "epoch": 0.6,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.1224,
39
+ "step": 500
40
+ },
41
+ {
42
+ "epoch": 0.71,
43
+ "learning_rate": 0.0001,
44
+ "loss": 1.1125,
45
+ "step": 600
46
+ },
47
+ {
48
+ "epoch": 0.83,
49
+ "learning_rate": 0.0001,
50
+ "loss": 1.1132,
51
+ "step": 700
52
+ },
53
+ {
54
+ "epoch": 0.95,
55
+ "learning_rate": 0.0001,
56
+ "loss": 1.108,
57
+ "step": 800
58
+ },
59
+ {
60
+ "epoch": 1.0,
61
+ "eval_accuracy": 0.7537090239410681,
62
+ "eval_loss": 1.3251845836639404,
63
+ "eval_runtime": 7.4484,
64
+ "eval_samples_per_second": 67.128,
65
+ "eval_steps_per_second": 8.458,
66
+ "step": 839
67
+ },
68
+ {
69
+ "epoch": 1.07,
70
+ "learning_rate": 0.0001,
71
+ "loss": 1.0901,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 1.19,
76
+ "learning_rate": 0.0001,
77
+ "loss": 1.0699,
78
+ "step": 1000
79
+ },
80
+ {
81
+ "epoch": 1.31,
82
+ "learning_rate": 0.0001,
83
+ "loss": 1.0637,
84
+ "step": 1100
85
+ },
86
+ {
87
+ "epoch": 1.43,
88
+ "learning_rate": 0.0001,
89
+ "loss": 1.0632,
90
+ "step": 1200
91
+ },
92
+ {
93
+ "epoch": 1.55,
94
+ "learning_rate": 0.0001,
95
+ "loss": 1.0595,
96
+ "step": 1300
97
+ },
98
+ {
99
+ "epoch": 1.67,
100
+ "learning_rate": 0.0001,
101
+ "loss": 1.0783,
102
+ "step": 1400
103
+ },
104
+ {
105
+ "epoch": 1.79,
106
+ "learning_rate": 0.0001,
107
+ "loss": 1.0591,
108
+ "step": 1500
109
+ },
110
+ {
111
+ "epoch": 1.91,
112
+ "learning_rate": 0.0001,
113
+ "loss": 1.0489,
114
+ "step": 1600
115
+ },
116
+ {
117
+ "epoch": 2.0,
118
+ "eval_accuracy": 0.7580478821362799,
119
+ "eval_loss": 1.2689599990844727,
120
+ "eval_runtime": 8.4463,
121
+ "eval_samples_per_second": 59.197,
122
+ "eval_steps_per_second": 7.459,
123
+ "step": 1678
124
+ },
125
+ {
126
+ "epoch": 2.03,
127
+ "learning_rate": 0.0001,
128
+ "loss": 1.0488,
129
+ "step": 1700
130
+ },
131
+ {
132
+ "epoch": 2.14,
133
+ "learning_rate": 0.0001,
134
+ "loss": 0.9965,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 2.26,
139
+ "learning_rate": 0.0001,
140
+ "loss": 0.9732,
141
+ "step": 1900
142
+ },
143
+ {
144
+ "epoch": 2.38,
145
+ "learning_rate": 0.0001,
146
+ "loss": 0.99,
147
+ "step": 2000
148
+ },
149
+ {
150
+ "epoch": 2.5,
151
+ "learning_rate": 0.0001,
152
+ "loss": 0.9894,
153
+ "step": 2100
154
+ },
155
+ {
156
+ "epoch": 2.62,
157
+ "learning_rate": 0.0001,
158
+ "loss": 0.9778,
159
+ "step": 2200
160
+ },
161
+ {
162
+ "epoch": 2.74,
163
+ "learning_rate": 0.0001,
164
+ "loss": 0.9737,
165
+ "step": 2300
166
+ },
167
+ {
168
+ "epoch": 2.86,
169
+ "learning_rate": 0.0001,
170
+ "loss": 0.9884,
171
+ "step": 2400
172
+ },
173
+ {
174
+ "epoch": 2.98,
175
+ "learning_rate": 0.0001,
176
+ "loss": 0.9812,
177
+ "step": 2500
178
+ },
179
+ {
180
+ "epoch": 3.0,
181
+ "eval_accuracy": 0.763097605893186,
182
+ "eval_loss": 1.160219669342041,
183
+ "eval_runtime": 8.5129,
184
+ "eval_samples_per_second": 58.735,
185
+ "eval_steps_per_second": 7.401,
186
+ "step": 2517
187
+ },
188
+ {
189
+ "epoch": 3.1,
190
+ "learning_rate": 0.0001,
191
+ "loss": 0.9032,
192
+ "step": 2600
193
+ },
194
+ {
195
+ "epoch": 3.22,
196
+ "learning_rate": 0.0001,
197
+ "loss": 0.8814,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 3.34,
202
+ "learning_rate": 0.0001,
203
+ "loss": 0.875,
204
+ "step": 2800
205
+ },
206
+ {
207
+ "epoch": 3.46,
208
+ "learning_rate": 0.0001,
209
+ "loss": 0.8893,
210
+ "step": 2900
211
+ },
212
+ {
213
+ "epoch": 3.57,
214
+ "learning_rate": 0.0001,
215
+ "loss": 0.8892,
216
+ "step": 3000
217
+ },
218
+ {
219
+ "epoch": 3.69,
220
+ "learning_rate": 0.0001,
221
+ "loss": 0.8681,
222
+ "step": 3100
223
+ },
224
+ {
225
+ "epoch": 3.81,
226
+ "learning_rate": 0.0001,
227
+ "loss": 0.892,
228
+ "step": 3200
229
+ },
230
+ {
231
+ "epoch": 3.93,
232
+ "learning_rate": 0.0001,
233
+ "loss": 0.902,
234
+ "step": 3300
235
+ },
236
+ {
237
+ "epoch": 4.0,
238
+ "eval_accuracy": 0.767915285451197,
239
+ "eval_loss": 1.098110556602478,
240
+ "eval_runtime": 8.2233,
241
+ "eval_samples_per_second": 60.803,
242
+ "eval_steps_per_second": 7.661,
243
+ "step": 3357
244
+ },
245
+ {
246
+ "epoch": 4.05,
247
+ "learning_rate": 0.0001,
248
+ "loss": 0.8432,
249
+ "step": 3400
250
+ },
251
+ {
252
+ "epoch": 4.17,
253
+ "learning_rate": 0.0001,
254
+ "loss": 0.7736,
255
+ "step": 3500
256
+ },
257
+ {
258
+ "epoch": 4.29,
259
+ "learning_rate": 0.0001,
260
+ "loss": 0.7731,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 4.41,
265
+ "learning_rate": 0.0001,
266
+ "loss": 0.7997,
267
+ "step": 3700
268
+ },
269
+ {
270
+ "epoch": 4.53,
271
+ "learning_rate": 0.0001,
272
+ "loss": 0.7945,
273
+ "step": 3800
274
+ },
275
+ {
276
+ "epoch": 4.65,
277
+ "learning_rate": 0.0001,
278
+ "loss": 0.7807,
279
+ "step": 3900
280
+ },
281
+ {
282
+ "epoch": 4.77,
283
+ "learning_rate": 0.0001,
284
+ "loss": 0.8013,
285
+ "step": 4000
286
+ },
287
+ {
288
+ "epoch": 4.89,
289
+ "learning_rate": 0.0001,
290
+ "loss": 0.8047,
291
+ "step": 4100
292
+ },
293
+ {
294
+ "epoch": 5.0,
295
+ "eval_accuracy": 0.7726629834254144,
296
+ "eval_loss": 1.0106405019760132,
297
+ "eval_runtime": 8.385,
298
+ "eval_samples_per_second": 59.631,
299
+ "eval_steps_per_second": 7.513,
300
+ "step": 4196
301
+ },
302
+ {
303
+ "epoch": 5.0,
304
+ "learning_rate": 0.0001,
305
+ "loss": 0.7922,
306
+ "step": 4200
307
+ },
308
+ {
309
+ "epoch": 5.12,
310
+ "learning_rate": 0.0001,
311
+ "loss": 0.6729,
312
+ "step": 4300
313
+ },
314
+ {
315
+ "epoch": 5.24,
316
+ "learning_rate": 0.0001,
317
+ "loss": 0.6862,
318
+ "step": 4400
319
+ },
320
+ {
321
+ "epoch": 5.36,
322
+ "learning_rate": 0.0001,
323
+ "loss": 0.7017,
324
+ "step": 4500
325
+ },
326
+ {
327
+ "epoch": 5.48,
328
+ "learning_rate": 0.0001,
329
+ "loss": 0.7026,
330
+ "step": 4600
331
+ },
332
+ {
333
+ "epoch": 5.6,
334
+ "learning_rate": 0.0001,
335
+ "loss": 0.696,
336
+ "step": 4700
337
+ },
338
+ {
339
+ "epoch": 5.72,
340
+ "learning_rate": 0.0001,
341
+ "loss": 0.6963,
342
+ "step": 4800
343
+ },
344
+ {
345
+ "epoch": 5.84,
346
+ "learning_rate": 0.0001,
347
+ "loss": 0.6963,
348
+ "step": 4900
349
+ },
350
+ {
351
+ "epoch": 5.96,
352
+ "learning_rate": 0.0001,
353
+ "loss": 0.7028,
354
+ "step": 5000
355
+ },
356
+ {
357
+ "epoch": 6.0,
358
+ "eval_accuracy": 0.7776611418047882,
359
+ "eval_loss": 0.9447547793388367,
360
+ "eval_runtime": 7.5604,
361
+ "eval_samples_per_second": 66.134,
362
+ "eval_steps_per_second": 8.333,
363
+ "step": 5035
364
+ },
365
+ {
366
+ "epoch": 6.08,
367
+ "learning_rate": 0.0001,
368
+ "loss": 0.645,
369
+ "step": 5100
370
+ },
371
+ {
372
+ "epoch": 6.2,
373
+ "learning_rate": 0.0001,
374
+ "loss": 0.5865,
375
+ "step": 5200
376
+ },
377
+ {
378
+ "epoch": 6.32,
379
+ "learning_rate": 0.0001,
380
+ "loss": 0.5926,
381
+ "step": 5300
382
+ },
383
+ {
384
+ "epoch": 6.43,
385
+ "learning_rate": 0.0001,
386
+ "loss": 0.6205,
387
+ "step": 5400
388
+ },
389
+ {
390
+ "epoch": 6.55,
391
+ "learning_rate": 0.0001,
392
+ "loss": 0.5919,
393
+ "step": 5500
394
+ },
395
+ {
396
+ "epoch": 6.67,
397
+ "learning_rate": 0.0001,
398
+ "loss": 0.6014,
399
+ "step": 5600
400
+ },
401
+ {
402
+ "epoch": 6.79,
403
+ "learning_rate": 0.0001,
404
+ "loss": 0.6239,
405
+ "step": 5700
406
+ },
407
+ {
408
+ "epoch": 6.91,
409
+ "learning_rate": 0.0001,
410
+ "loss": 0.6141,
411
+ "step": 5800
412
+ },
413
+ {
414
+ "epoch": 7.0,
415
+ "eval_accuracy": 0.7818011049723756,
416
+ "eval_loss": 0.87887042760849,
417
+ "eval_runtime": 8.2091,
418
+ "eval_samples_per_second": 60.908,
419
+ "eval_steps_per_second": 7.674,
420
+ "step": 5874
421
+ },
422
+ {
423
+ "epoch": 7.03,
424
+ "learning_rate": 0.0001,
425
+ "loss": 0.5949,
426
+ "step": 5900
427
+ },
428
+ {
429
+ "epoch": 7.15,
430
+ "learning_rate": 0.0001,
431
+ "loss": 0.4956,
432
+ "step": 6000
433
+ },
434
+ {
435
+ "epoch": 7.27,
436
+ "learning_rate": 0.0001,
437
+ "loss": 0.5174,
438
+ "step": 6100
439
+ },
440
+ {
441
+ "epoch": 7.39,
442
+ "learning_rate": 0.0001,
443
+ "loss": 0.5071,
444
+ "step": 6200
445
+ },
446
+ {
447
+ "epoch": 7.51,
448
+ "learning_rate": 0.0001,
449
+ "loss": 0.5261,
450
+ "step": 6300
451
+ },
452
+ {
453
+ "epoch": 7.63,
454
+ "learning_rate": 0.0001,
455
+ "loss": 0.5203,
456
+ "step": 6400
457
+ },
458
+ {
459
+ "epoch": 7.75,
460
+ "learning_rate": 0.0001,
461
+ "loss": 0.5369,
462
+ "step": 6500
463
+ },
464
+ {
465
+ "epoch": 7.86,
466
+ "learning_rate": 0.0001,
467
+ "loss": 0.5414,
468
+ "step": 6600
469
+ },
470
+ {
471
+ "epoch": 7.98,
472
+ "learning_rate": 0.0001,
473
+ "loss": 0.5393,
474
+ "step": 6700
475
+ },
476
+ {
477
+ "epoch": 8.0,
478
+ "eval_accuracy": 0.7858710865561694,
479
+ "eval_loss": 0.8736603856086731,
480
+ "eval_runtime": 7.5811,
481
+ "eval_samples_per_second": 65.954,
482
+ "eval_steps_per_second": 8.31,
483
+ "step": 6714
484
+ },
485
+ {
486
+ "epoch": 8.1,
487
+ "learning_rate": 0.0001,
488
+ "loss": 0.448,
489
+ "step": 6800
490
+ },
491
+ {
492
+ "epoch": 8.22,
493
+ "learning_rate": 0.0001,
494
+ "loss": 0.4347,
495
+ "step": 6900
496
+ },
497
+ {
498
+ "epoch": 8.34,
499
+ "learning_rate": 0.0001,
500
+ "loss": 0.4397,
501
+ "step": 7000
502
+ },
503
+ {
504
+ "epoch": 8.46,
505
+ "learning_rate": 0.0001,
506
+ "loss": 0.4468,
507
+ "step": 7100
508
+ },
509
+ {
510
+ "epoch": 8.58,
511
+ "learning_rate": 0.0001,
512
+ "loss": 0.4477,
513
+ "step": 7200
514
+ },
515
+ {
516
+ "epoch": 8.7,
517
+ "learning_rate": 0.0001,
518
+ "loss": 0.449,
519
+ "step": 7300
520
+ },
521
+ {
522
+ "epoch": 8.82,
523
+ "learning_rate": 0.0001,
524
+ "loss": 0.4498,
525
+ "step": 7400
526
+ },
527
+ {
528
+ "epoch": 8.94,
529
+ "learning_rate": 0.0001,
530
+ "loss": 0.4595,
531
+ "step": 7500
532
+ },
533
+ {
534
+ "epoch": 9.0,
535
+ "eval_accuracy": 0.7895580110497238,
536
+ "eval_loss": 0.8018783926963806,
537
+ "eval_runtime": 8.3817,
538
+ "eval_samples_per_second": 59.654,
539
+ "eval_steps_per_second": 7.516,
540
+ "step": 7553
541
+ },
542
+ {
543
+ "epoch": 9.06,
544
+ "learning_rate": 0.0001,
545
+ "loss": 0.4064,
546
+ "step": 7600
547
+ },
548
+ {
549
+ "epoch": 9.17,
550
+ "learning_rate": 0.0001,
551
+ "loss": 0.3644,
552
+ "step": 7700
553
+ },
554
+ {
555
+ "epoch": 9.29,
556
+ "learning_rate": 0.0001,
557
+ "loss": 0.3693,
558
+ "step": 7800
559
+ },
560
+ {
561
+ "epoch": 9.41,
562
+ "learning_rate": 0.0001,
563
+ "loss": 0.3776,
564
+ "step": 7900
565
+ },
566
+ {
567
+ "epoch": 9.53,
568
+ "learning_rate": 0.0001,
569
+ "loss": 0.3877,
570
+ "step": 8000
571
+ },
572
+ {
573
+ "epoch": 9.65,
574
+ "learning_rate": 0.0001,
575
+ "loss": 0.3802,
576
+ "step": 8100
577
+ },
578
+ {
579
+ "epoch": 9.77,
580
+ "learning_rate": 0.0001,
581
+ "loss": 0.3881,
582
+ "step": 8200
583
+ },
584
+ {
585
+ "epoch": 9.89,
586
+ "learning_rate": 0.0001,
587
+ "loss": 0.3927,
588
+ "step": 8300
589
+ },
590
+ {
591
+ "epoch": 10.0,
592
+ "eval_accuracy": 0.7928508287292818,
593
+ "eval_loss": 0.7676272392272949,
594
+ "eval_runtime": 8.6003,
595
+ "eval_samples_per_second": 58.138,
596
+ "eval_steps_per_second": 7.325,
597
+ "step": 8390
598
+ },
599
+ {
600
+ "epoch": 10.0,
601
+ "step": 8390,
602
+ "total_flos": 1.7650637705911992e+18,
603
+ "train_loss": 0.750629533715413,
604
+ "train_runtime": 6999.9579,
605
+ "train_samples_per_second": 38.363,
606
+ "train_steps_per_second": 1.199
607
+ }
608
+ ],
609
+ "logging_steps": 100,
610
+ "max_steps": 8390,
611
+ "num_train_epochs": 10,
612
+ "save_steps": 500,
613
+ "total_flos": 1.7650637705911992e+18,
614
+ "trial_name": null,
615
+ "trial_params": null
616
+ }