flatala-research commited on
Commit
f641976
1 Parent(s): 4a0b6fd

End of training

Browse files
Files changed (3) hide show
  1. all_results.json +8 -0
  2. test_results.json +8 -0
  3. trainer_state.json +783 -0
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.117521367521367,
3
+ "eval_accuracy": 0.5812807881773399,
4
+ "eval_loss": 1.3488672971725464,
5
+ "eval_runtime": 14.5651,
6
+ "eval_samples_per_second": 13.937,
7
+ "eval_steps_per_second": 1.785
8
+ }
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.117521367521367,
3
+ "eval_accuracy": 0.5812807881773399,
4
+ "eval_loss": 1.3488672971725464,
5
+ "eval_runtime": 14.5651,
6
+ "eval_samples_per_second": 13.937,
7
+ "eval_steps_per_second": 1.785
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6078431372549019,
3
+ "best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-right-hand-conflab-v3/checkpoint-826",
4
+ "epoch": 7.117521367521367,
5
+ "eval_steps": 500,
6
+ "global_step": 936,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.010683760683760684,
13
+ "grad_norm": 7.551811218261719,
14
+ "learning_rate": 5.319148936170213e-06,
15
+ "loss": 2.0365,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.021367521367521368,
20
+ "grad_norm": 6.67141580581665,
21
+ "learning_rate": 1.0638297872340426e-05,
22
+ "loss": 2.0409,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.03205128205128205,
27
+ "grad_norm": 7.671868801116943,
28
+ "learning_rate": 1.595744680851064e-05,
29
+ "loss": 2.0454,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.042735042735042736,
34
+ "grad_norm": 8.299155235290527,
35
+ "learning_rate": 2.1276595744680852e-05,
36
+ "loss": 1.9744,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.053418803418803416,
41
+ "grad_norm": 8.462949752807617,
42
+ "learning_rate": 2.6595744680851064e-05,
43
+ "loss": 1.9464,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.0641025641025641,
48
+ "grad_norm": 10.244002342224121,
49
+ "learning_rate": 3.191489361702128e-05,
50
+ "loss": 1.937,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.07478632478632478,
55
+ "grad_norm": 5.489703178405762,
56
+ "learning_rate": 3.723404255319149e-05,
57
+ "loss": 1.8308,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.08547008547008547,
62
+ "grad_norm": 9.307482719421387,
63
+ "learning_rate": 4.2553191489361704e-05,
64
+ "loss": 1.9194,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.09615384615384616,
69
+ "grad_norm": 6.383068084716797,
70
+ "learning_rate": 4.787234042553192e-05,
71
+ "loss": 1.8826,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.10683760683760683,
76
+ "grad_norm": 7.77570915222168,
77
+ "learning_rate": 4.96437054631829e-05,
78
+ "loss": 1.9247,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.11752136752136752,
83
+ "grad_norm": 5.738762378692627,
84
+ "learning_rate": 4.90498812351544e-05,
85
+ "loss": 1.9096,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.12606837606837606,
90
+ "eval_accuracy": 0.3333333333333333,
91
+ "eval_loss": 1.688755750656128,
92
+ "eval_runtime": 17.8148,
93
+ "eval_samples_per_second": 11.451,
94
+ "eval_steps_per_second": 1.459,
95
+ "step": 118
96
+ },
97
+ {
98
+ "epoch": 1.0021367521367521,
99
+ "grad_norm": 6.397932052612305,
100
+ "learning_rate": 4.845605700712589e-05,
101
+ "loss": 1.7919,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 1.0128205128205128,
106
+ "grad_norm": 6.429988384246826,
107
+ "learning_rate": 4.7862232779097386e-05,
108
+ "loss": 1.7007,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 1.0235042735042734,
113
+ "grad_norm": 9.103757858276367,
114
+ "learning_rate": 4.7268408551068886e-05,
115
+ "loss": 1.8668,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 1.0341880341880343,
120
+ "grad_norm": 8.578359603881836,
121
+ "learning_rate": 4.667458432304038e-05,
122
+ "loss": 1.8187,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 1.044871794871795,
127
+ "grad_norm": 9.773784637451172,
128
+ "learning_rate": 4.6080760095011874e-05,
129
+ "loss": 1.7984,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 1.0555555555555556,
134
+ "grad_norm": 8.500809669494629,
135
+ "learning_rate": 4.5486935866983374e-05,
136
+ "loss": 1.6211,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 1.0662393162393162,
141
+ "grad_norm": 5.273374557495117,
142
+ "learning_rate": 4.4893111638954874e-05,
143
+ "loss": 1.6033,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 1.0769230769230769,
148
+ "grad_norm": 9.878400802612305,
149
+ "learning_rate": 4.429928741092637e-05,
150
+ "loss": 1.6822,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 1.0876068376068375,
155
+ "grad_norm": 9.135309219360352,
156
+ "learning_rate": 4.370546318289787e-05,
157
+ "loss": 1.5063,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 1.0982905982905984,
162
+ "grad_norm": 7.596296787261963,
163
+ "learning_rate": 4.311163895486936e-05,
164
+ "loss": 1.4128,
165
+ "step": 210
166
+ },
167
+ {
168
+ "epoch": 1.108974358974359,
169
+ "grad_norm": 7.444425582885742,
170
+ "learning_rate": 4.2517814726840856e-05,
171
+ "loss": 1.6052,
172
+ "step": 220
173
+ },
174
+ {
175
+ "epoch": 1.1196581196581197,
176
+ "grad_norm": 8.690657615661621,
177
+ "learning_rate": 4.1923990498812356e-05,
178
+ "loss": 1.628,
179
+ "step": 230
180
+ },
181
+ {
182
+ "epoch": 1.126068376068376,
183
+ "eval_accuracy": 0.4117647058823529,
184
+ "eval_loss": 1.6285208463668823,
185
+ "eval_runtime": 14.5186,
186
+ "eval_samples_per_second": 14.051,
187
+ "eval_steps_per_second": 1.791,
188
+ "step": 236
189
+ },
190
+ {
191
+ "epoch": 2.0042735042735043,
192
+ "grad_norm": 6.66658878326416,
193
+ "learning_rate": 4.133016627078385e-05,
194
+ "loss": 1.5841,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 2.014957264957265,
199
+ "grad_norm": 7.023474216461182,
200
+ "learning_rate": 4.073634204275535e-05,
201
+ "loss": 1.3114,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 2.0256410256410255,
206
+ "grad_norm": 5.985867500305176,
207
+ "learning_rate": 4.0142517814726843e-05,
208
+ "loss": 1.3695,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 2.036324786324786,
213
+ "grad_norm": 6.7717719078063965,
214
+ "learning_rate": 3.954869358669834e-05,
215
+ "loss": 1.4598,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 2.047008547008547,
220
+ "grad_norm": 12.264374732971191,
221
+ "learning_rate": 3.895486935866984e-05,
222
+ "loss": 1.337,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 2.0576923076923075,
227
+ "grad_norm": 10.283348083496094,
228
+ "learning_rate": 3.836104513064133e-05,
229
+ "loss": 1.4704,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 2.0683760683760686,
234
+ "grad_norm": 9.656314849853516,
235
+ "learning_rate": 3.7767220902612825e-05,
236
+ "loss": 1.5234,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 2.0790598290598292,
241
+ "grad_norm": 7.528780937194824,
242
+ "learning_rate": 3.7173396674584325e-05,
243
+ "loss": 1.4436,
244
+ "step": 310
245
+ },
246
+ {
247
+ "epoch": 2.08974358974359,
248
+ "grad_norm": 8.336195945739746,
249
+ "learning_rate": 3.657957244655582e-05,
250
+ "loss": 1.2449,
251
+ "step": 320
252
+ },
253
+ {
254
+ "epoch": 2.1004273504273505,
255
+ "grad_norm": 10.961182594299316,
256
+ "learning_rate": 3.598574821852731e-05,
257
+ "loss": 1.4239,
258
+ "step": 330
259
+ },
260
+ {
261
+ "epoch": 2.111111111111111,
262
+ "grad_norm": 7.471282482147217,
263
+ "learning_rate": 3.539192399049881e-05,
264
+ "loss": 1.4062,
265
+ "step": 340
266
+ },
267
+ {
268
+ "epoch": 2.121794871794872,
269
+ "grad_norm": 8.72683334350586,
270
+ "learning_rate": 3.479809976247031e-05,
271
+ "loss": 1.3656,
272
+ "step": 350
273
+ },
274
+ {
275
+ "epoch": 2.126068376068376,
276
+ "eval_accuracy": 0.5147058823529411,
277
+ "eval_loss": 1.3947311639785767,
278
+ "eval_runtime": 14.5916,
279
+ "eval_samples_per_second": 13.981,
280
+ "eval_steps_per_second": 1.782,
281
+ "step": 354
282
+ },
283
+ {
284
+ "epoch": 3.0064102564102564,
285
+ "grad_norm": 10.454861640930176,
286
+ "learning_rate": 3.4204275534441806e-05,
287
+ "loss": 1.1968,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 3.017094017094017,
292
+ "grad_norm": 7.894044876098633,
293
+ "learning_rate": 3.361045130641331e-05,
294
+ "loss": 1.0705,
295
+ "step": 370
296
+ },
297
+ {
298
+ "epoch": 3.0277777777777777,
299
+ "grad_norm": 11.178865432739258,
300
+ "learning_rate": 3.30166270783848e-05,
301
+ "loss": 1.0553,
302
+ "step": 380
303
+ },
304
+ {
305
+ "epoch": 3.0384615384615383,
306
+ "grad_norm": 13.55367374420166,
307
+ "learning_rate": 3.24228028503563e-05,
308
+ "loss": 0.8611,
309
+ "step": 390
310
+ },
311
+ {
312
+ "epoch": 3.049145299145299,
313
+ "grad_norm": 11.7691650390625,
314
+ "learning_rate": 3.1828978622327794e-05,
315
+ "loss": 1.1736,
316
+ "step": 400
317
+ },
318
+ {
319
+ "epoch": 3.0598290598290596,
320
+ "grad_norm": 9.843459129333496,
321
+ "learning_rate": 3.123515439429929e-05,
322
+ "loss": 0.9042,
323
+ "step": 410
324
+ },
325
+ {
326
+ "epoch": 3.0705128205128207,
327
+ "grad_norm": 12.834084510803223,
328
+ "learning_rate": 3.064133016627079e-05,
329
+ "loss": 1.0035,
330
+ "step": 420
331
+ },
332
+ {
333
+ "epoch": 3.0811965811965814,
334
+ "grad_norm": 11.492711067199707,
335
+ "learning_rate": 3.0047505938242282e-05,
336
+ "loss": 1.0545,
337
+ "step": 430
338
+ },
339
+ {
340
+ "epoch": 3.091880341880342,
341
+ "grad_norm": 11.115935325622559,
342
+ "learning_rate": 2.9453681710213776e-05,
343
+ "loss": 1.046,
344
+ "step": 440
345
+ },
346
+ {
347
+ "epoch": 3.1025641025641026,
348
+ "grad_norm": 11.434919357299805,
349
+ "learning_rate": 2.8859857482185276e-05,
350
+ "loss": 0.9765,
351
+ "step": 450
352
+ },
353
+ {
354
+ "epoch": 3.1132478632478633,
355
+ "grad_norm": 13.18517017364502,
356
+ "learning_rate": 2.826603325415677e-05,
357
+ "loss": 1.0194,
358
+ "step": 460
359
+ },
360
+ {
361
+ "epoch": 3.123931623931624,
362
+ "grad_norm": 9.168006896972656,
363
+ "learning_rate": 2.7672209026128266e-05,
364
+ "loss": 1.1498,
365
+ "step": 470
366
+ },
367
+ {
368
+ "epoch": 3.126068376068376,
369
+ "eval_accuracy": 0.5735294117647058,
370
+ "eval_loss": 1.2639243602752686,
371
+ "eval_runtime": 15.2962,
372
+ "eval_samples_per_second": 13.337,
373
+ "eval_steps_per_second": 1.7,
374
+ "step": 472
375
+ },
376
+ {
377
+ "epoch": 4.0085470085470085,
378
+ "grad_norm": 10.556476593017578,
379
+ "learning_rate": 2.7078384798099763e-05,
380
+ "loss": 0.7885,
381
+ "step": 480
382
+ },
383
+ {
384
+ "epoch": 4.019230769230769,
385
+ "grad_norm": 9.74152660369873,
386
+ "learning_rate": 2.648456057007126e-05,
387
+ "loss": 0.6999,
388
+ "step": 490
389
+ },
390
+ {
391
+ "epoch": 4.02991452991453,
392
+ "grad_norm": 8.711644172668457,
393
+ "learning_rate": 2.5890736342042754e-05,
394
+ "loss": 0.7566,
395
+ "step": 500
396
+ },
397
+ {
398
+ "epoch": 4.0405982905982905,
399
+ "grad_norm": 13.662306785583496,
400
+ "learning_rate": 2.5296912114014254e-05,
401
+ "loss": 0.7932,
402
+ "step": 510
403
+ },
404
+ {
405
+ "epoch": 4.051282051282051,
406
+ "grad_norm": 9.280159950256348,
407
+ "learning_rate": 2.4703087885985748e-05,
408
+ "loss": 0.7025,
409
+ "step": 520
410
+ },
411
+ {
412
+ "epoch": 4.061965811965812,
413
+ "grad_norm": 7.840063095092773,
414
+ "learning_rate": 2.4109263657957245e-05,
415
+ "loss": 0.6106,
416
+ "step": 530
417
+ },
418
+ {
419
+ "epoch": 4.072649572649572,
420
+ "grad_norm": 3.8308825492858887,
421
+ "learning_rate": 2.3515439429928742e-05,
422
+ "loss": 0.6493,
423
+ "step": 540
424
+ },
425
+ {
426
+ "epoch": 4.083333333333333,
427
+ "grad_norm": 13.45524787902832,
428
+ "learning_rate": 2.292161520190024e-05,
429
+ "loss": 1.0321,
430
+ "step": 550
431
+ },
432
+ {
433
+ "epoch": 4.094017094017094,
434
+ "grad_norm": 13.148232460021973,
435
+ "learning_rate": 2.2327790973871736e-05,
436
+ "loss": 0.756,
437
+ "step": 560
438
+ },
439
+ {
440
+ "epoch": 4.104700854700854,
441
+ "grad_norm": 6.532124042510986,
442
+ "learning_rate": 2.1733966745843233e-05,
443
+ "loss": 0.8232,
444
+ "step": 570
445
+ },
446
+ {
447
+ "epoch": 4.115384615384615,
448
+ "grad_norm": 8.199164390563965,
449
+ "learning_rate": 2.114014251781473e-05,
450
+ "loss": 0.6284,
451
+ "step": 580
452
+ },
453
+ {
454
+ "epoch": 4.1260683760683765,
455
+ "grad_norm": 16.024168014526367,
456
+ "learning_rate": 2.0546318289786223e-05,
457
+ "loss": 0.6546,
458
+ "step": 590
459
+ },
460
+ {
461
+ "epoch": 4.1260683760683765,
462
+ "eval_accuracy": 0.5882352941176471,
463
+ "eval_loss": 1.2053728103637695,
464
+ "eval_runtime": 14.7556,
465
+ "eval_samples_per_second": 13.825,
466
+ "eval_steps_per_second": 1.762,
467
+ "step": 590
468
+ },
469
+ {
470
+ "epoch": 5.010683760683761,
471
+ "grad_norm": 8.120397567749023,
472
+ "learning_rate": 1.995249406175772e-05,
473
+ "loss": 0.5882,
474
+ "step": 600
475
+ },
476
+ {
477
+ "epoch": 5.021367521367521,
478
+ "grad_norm": 8.652383804321289,
479
+ "learning_rate": 1.9358669833729217e-05,
480
+ "loss": 0.584,
481
+ "step": 610
482
+ },
483
+ {
484
+ "epoch": 5.032051282051282,
485
+ "grad_norm": 13.461187362670898,
486
+ "learning_rate": 1.876484560570071e-05,
487
+ "loss": 0.6268,
488
+ "step": 620
489
+ },
490
+ {
491
+ "epoch": 5.042735042735043,
492
+ "grad_norm": 4.858776569366455,
493
+ "learning_rate": 1.8171021377672208e-05,
494
+ "loss": 0.503,
495
+ "step": 630
496
+ },
497
+ {
498
+ "epoch": 5.053418803418803,
499
+ "grad_norm": 9.537641525268555,
500
+ "learning_rate": 1.7577197149643705e-05,
501
+ "loss": 0.5206,
502
+ "step": 640
503
+ },
504
+ {
505
+ "epoch": 5.064102564102564,
506
+ "grad_norm": 12.624528884887695,
507
+ "learning_rate": 1.6983372921615205e-05,
508
+ "loss": 0.4366,
509
+ "step": 650
510
+ },
511
+ {
512
+ "epoch": 5.0747863247863245,
513
+ "grad_norm": 12.749320030212402,
514
+ "learning_rate": 1.63895486935867e-05,
515
+ "loss": 0.4381,
516
+ "step": 660
517
+ },
518
+ {
519
+ "epoch": 5.085470085470085,
520
+ "grad_norm": 10.724397659301758,
521
+ "learning_rate": 1.5795724465558196e-05,
522
+ "loss": 0.3677,
523
+ "step": 670
524
+ },
525
+ {
526
+ "epoch": 5.096153846153846,
527
+ "grad_norm": 4.257500171661377,
528
+ "learning_rate": 1.5201900237529693e-05,
529
+ "loss": 0.5245,
530
+ "step": 680
531
+ },
532
+ {
533
+ "epoch": 5.1068376068376065,
534
+ "grad_norm": 11.950453758239746,
535
+ "learning_rate": 1.4608076009501186e-05,
536
+ "loss": 0.3506,
537
+ "step": 690
538
+ },
539
+ {
540
+ "epoch": 5.117521367521367,
541
+ "grad_norm": 15.27865219116211,
542
+ "learning_rate": 1.4014251781472683e-05,
543
+ "loss": 0.4812,
544
+ "step": 700
545
+ },
546
+ {
547
+ "epoch": 5.1260683760683765,
548
+ "eval_accuracy": 0.5490196078431373,
549
+ "eval_loss": 1.2694668769836426,
550
+ "eval_runtime": 14.7973,
551
+ "eval_samples_per_second": 13.786,
552
+ "eval_steps_per_second": 1.757,
553
+ "step": 708
554
+ },
555
+ {
556
+ "epoch": 6.002136752136752,
557
+ "grad_norm": 3.3596088886260986,
558
+ "learning_rate": 1.3420427553444182e-05,
559
+ "loss": 0.3931,
560
+ "step": 710
561
+ },
562
+ {
563
+ "epoch": 6.012820512820513,
564
+ "grad_norm": 13.172847747802734,
565
+ "learning_rate": 1.2826603325415679e-05,
566
+ "loss": 0.2829,
567
+ "step": 720
568
+ },
569
+ {
570
+ "epoch": 6.023504273504273,
571
+ "grad_norm": 4.362306594848633,
572
+ "learning_rate": 1.2232779097387174e-05,
573
+ "loss": 0.2584,
574
+ "step": 730
575
+ },
576
+ {
577
+ "epoch": 6.034188034188034,
578
+ "grad_norm": 10.034035682678223,
579
+ "learning_rate": 1.163895486935867e-05,
580
+ "loss": 0.4665,
581
+ "step": 740
582
+ },
583
+ {
584
+ "epoch": 6.044871794871795,
585
+ "grad_norm": 6.337015151977539,
586
+ "learning_rate": 1.1045130641330167e-05,
587
+ "loss": 0.4468,
588
+ "step": 750
589
+ },
590
+ {
591
+ "epoch": 6.055555555555555,
592
+ "grad_norm": 8.211027145385742,
593
+ "learning_rate": 1.0451306413301664e-05,
594
+ "loss": 0.2763,
595
+ "step": 760
596
+ },
597
+ {
598
+ "epoch": 6.066239316239316,
599
+ "grad_norm": 10.967971801757812,
600
+ "learning_rate": 9.857482185273159e-06,
601
+ "loss": 0.4814,
602
+ "step": 770
603
+ },
604
+ {
605
+ "epoch": 6.076923076923077,
606
+ "grad_norm": 4.332571506500244,
607
+ "learning_rate": 9.263657957244656e-06,
608
+ "loss": 0.3561,
609
+ "step": 780
610
+ },
611
+ {
612
+ "epoch": 6.087606837606837,
613
+ "grad_norm": 21.54241180419922,
614
+ "learning_rate": 8.669833729216153e-06,
615
+ "loss": 0.2402,
616
+ "step": 790
617
+ },
618
+ {
619
+ "epoch": 6.098290598290598,
620
+ "grad_norm": 9.541482925415039,
621
+ "learning_rate": 8.07600950118765e-06,
622
+ "loss": 0.4022,
623
+ "step": 800
624
+ },
625
+ {
626
+ "epoch": 6.108974358974359,
627
+ "grad_norm": 12.201172828674316,
628
+ "learning_rate": 7.482185273159145e-06,
629
+ "loss": 0.3603,
630
+ "step": 810
631
+ },
632
+ {
633
+ "epoch": 6.119658119658119,
634
+ "grad_norm": 2.7810683250427246,
635
+ "learning_rate": 6.888361045130641e-06,
636
+ "loss": 0.4631,
637
+ "step": 820
638
+ },
639
+ {
640
+ "epoch": 6.1260683760683765,
641
+ "eval_accuracy": 0.6078431372549019,
642
+ "eval_loss": 1.209458351135254,
643
+ "eval_runtime": 16.1617,
644
+ "eval_samples_per_second": 12.622,
645
+ "eval_steps_per_second": 1.609,
646
+ "step": 826
647
+ },
648
+ {
649
+ "epoch": 7.004273504273504,
650
+ "grad_norm": 9.450981140136719,
651
+ "learning_rate": 6.294536817102138e-06,
652
+ "loss": 0.4533,
653
+ "step": 830
654
+ },
655
+ {
656
+ "epoch": 7.014957264957265,
657
+ "grad_norm": 8.65283489227295,
658
+ "learning_rate": 5.700712589073634e-06,
659
+ "loss": 0.2189,
660
+ "step": 840
661
+ },
662
+ {
663
+ "epoch": 7.0256410256410255,
664
+ "grad_norm": 2.8259940147399902,
665
+ "learning_rate": 5.1068883610451305e-06,
666
+ "loss": 0.266,
667
+ "step": 850
668
+ },
669
+ {
670
+ "epoch": 7.036324786324786,
671
+ "grad_norm": 11.976675033569336,
672
+ "learning_rate": 4.513064133016627e-06,
673
+ "loss": 0.3194,
674
+ "step": 860
675
+ },
676
+ {
677
+ "epoch": 7.047008547008547,
678
+ "grad_norm": 9.489921569824219,
679
+ "learning_rate": 3.919239904988124e-06,
680
+ "loss": 0.3841,
681
+ "step": 870
682
+ },
683
+ {
684
+ "epoch": 7.0576923076923075,
685
+ "grad_norm": 5.314388751983643,
686
+ "learning_rate": 3.3254156769596202e-06,
687
+ "loss": 0.2449,
688
+ "step": 880
689
+ },
690
+ {
691
+ "epoch": 7.068376068376068,
692
+ "grad_norm": 20.23029327392578,
693
+ "learning_rate": 2.7315914489311168e-06,
694
+ "loss": 0.314,
695
+ "step": 890
696
+ },
697
+ {
698
+ "epoch": 7.079059829059829,
699
+ "grad_norm": 10.66151237487793,
700
+ "learning_rate": 2.137767220902613e-06,
701
+ "loss": 0.254,
702
+ "step": 900
703
+ },
704
+ {
705
+ "epoch": 7.089743589743589,
706
+ "grad_norm": 5.35306978225708,
707
+ "learning_rate": 1.5439429928741092e-06,
708
+ "loss": 0.4233,
709
+ "step": 910
710
+ },
711
+ {
712
+ "epoch": 7.10042735042735,
713
+ "grad_norm": 6.157220363616943,
714
+ "learning_rate": 9.501187648456058e-07,
715
+ "loss": 0.343,
716
+ "step": 920
717
+ },
718
+ {
719
+ "epoch": 7.111111111111111,
720
+ "grad_norm": 12.305887222290039,
721
+ "learning_rate": 3.5629453681710215e-07,
722
+ "loss": 0.2311,
723
+ "step": 930
724
+ },
725
+ {
726
+ "epoch": 7.117521367521367,
727
+ "eval_accuracy": 0.5980392156862745,
728
+ "eval_loss": 1.1960477828979492,
729
+ "eval_runtime": 14.4874,
730
+ "eval_samples_per_second": 14.081,
731
+ "eval_steps_per_second": 1.795,
732
+ "step": 936
733
+ },
734
+ {
735
+ "epoch": 7.117521367521367,
736
+ "step": 936,
737
+ "total_flos": 9.31358507420772e+18,
738
+ "train_loss": 0.996516230269375,
739
+ "train_runtime": 1255.4154,
740
+ "train_samples_per_second": 5.965,
741
+ "train_steps_per_second": 0.746
742
+ },
743
+ {
744
+ "epoch": 7.117521367521367,
745
+ "eval_accuracy": 0.5812807881773399,
746
+ "eval_loss": 1.3488672971725464,
747
+ "eval_runtime": 28.5732,
748
+ "eval_samples_per_second": 7.105,
749
+ "eval_steps_per_second": 0.91,
750
+ "step": 936
751
+ },
752
+ {
753
+ "epoch": 7.117521367521367,
754
+ "eval_accuracy": 0.5812807881773399,
755
+ "eval_loss": 1.3488672971725464,
756
+ "eval_runtime": 14.5651,
757
+ "eval_samples_per_second": 13.937,
758
+ "eval_steps_per_second": 1.785,
759
+ "step": 936
760
+ }
761
+ ],
762
+ "logging_steps": 10,
763
+ "max_steps": 936,
764
+ "num_input_tokens_seen": 0,
765
+ "num_train_epochs": 9223372036854775807,
766
+ "save_steps": 500,
767
+ "stateful_callbacks": {
768
+ "TrainerControl": {
769
+ "args": {
770
+ "should_epoch_stop": false,
771
+ "should_evaluate": false,
772
+ "should_log": false,
773
+ "should_save": true,
774
+ "should_training_stop": true
775
+ },
776
+ "attributes": {}
777
+ }
778
+ },
779
+ "total_flos": 9.31358507420772e+18,
780
+ "train_batch_size": 8,
781
+ "trial_name": null,
782
+ "trial_params": null
783
+ }