Thoma commited on
Commit
f3b7562
·
verified ·
1 Parent(s): fb34822

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0857a937c68c840dd24f04556915949db53732da4a4a835f9521c7a20ade84b9
3
  size 64219860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8be7e33790e574c5379a5c9d64d3394c61f0bc03461887c111556efd00e8f18
3
  size 64219860
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:354fcbfab35beb320321fc72abe28e2ba17d63b820a2c10b34cfe337af85be52
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9f9fa4f12eb656180c05ab6d0e11868fa1b817f9a6c422f3c0302601735f4e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccc7e73cc5879da996ace4c3a10d9efe08a100111973e801d61997747e95e982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003987322359676924,
5
  "eval_steps": 13,
6
- "global_step": 39,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -312,6 +312,83 @@
312
  "eval_samples_per_second": 21.144,
313
  "eval_steps_per_second": 10.575,
314
  "step": 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  }
316
  ],
317
  "logging_steps": 1,
@@ -326,12 +403,12 @@
326
  "should_evaluate": false,
327
  "should_log": false,
328
  "should_save": true,
329
- "should_training_stop": false
330
  },
331
  "attributes": {}
332
  }
333
  },
334
- "total_flos": 1.23758343880704e+16,
335
  "train_batch_size": 2,
336
  "trial_name": null,
337
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0051119517431755445,
5
  "eval_steps": 13,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
312
  "eval_samples_per_second": 21.144,
313
  "eval_steps_per_second": 10.575,
314
  "step": 39
315
+ },
316
+ {
317
+ "epoch": 0.004089561394540435,
318
+ "grad_norm": NaN,
319
+ "learning_rate": 2.9289321881345254e-05,
320
+ "loss": 0.0,
321
+ "step": 40
322
+ },
323
+ {
324
+ "epoch": 0.004191800429403946,
325
+ "grad_norm": NaN,
326
+ "learning_rate": 2.3959403439996907e-05,
327
+ "loss": 0.0,
328
+ "step": 41
329
+ },
330
+ {
331
+ "epoch": 0.004294039464267457,
332
+ "grad_norm": NaN,
333
+ "learning_rate": 1.9098300562505266e-05,
334
+ "loss": 0.0,
335
+ "step": 42
336
+ },
337
+ {
338
+ "epoch": 0.004396278499130968,
339
+ "grad_norm": NaN,
340
+ "learning_rate": 1.4735983564590783e-05,
341
+ "loss": 0.0,
342
+ "step": 43
343
+ },
344
+ {
345
+ "epoch": 0.004498517533994479,
346
+ "grad_norm": NaN,
347
+ "learning_rate": 1.0899347581163221e-05,
348
+ "loss": 0.0,
349
+ "step": 44
350
+ },
351
+ {
352
+ "epoch": 0.00460075656885799,
353
+ "grad_norm": NaN,
354
+ "learning_rate": 7.612046748871327e-06,
355
+ "loss": 0.0,
356
+ "step": 45
357
+ },
358
+ {
359
+ "epoch": 0.0047029956037215005,
360
+ "grad_norm": NaN,
361
+ "learning_rate": 4.8943483704846475e-06,
362
+ "loss": 0.0,
363
+ "step": 46
364
+ },
365
+ {
366
+ "epoch": 0.0048052346385850115,
367
+ "grad_norm": NaN,
368
+ "learning_rate": 2.7630079602323442e-06,
369
+ "loss": 0.0,
370
+ "step": 47
371
+ },
372
+ {
373
+ "epoch": 0.0049074736734485225,
374
+ "grad_norm": NaN,
375
+ "learning_rate": 1.231165940486234e-06,
376
+ "loss": 0.0,
377
+ "step": 48
378
+ },
379
+ {
380
+ "epoch": 0.0050097127083120335,
381
+ "grad_norm": NaN,
382
+ "learning_rate": 3.0826662668720364e-07,
383
+ "loss": 0.0,
384
+ "step": 49
385
+ },
386
+ {
387
+ "epoch": 0.0051119517431755445,
388
+ "grad_norm": NaN,
389
+ "learning_rate": 0.0,
390
+ "loss": 0.0,
391
+ "step": 50
392
  }
393
  ],
394
  "logging_steps": 1,
 
403
  "should_evaluate": false,
404
  "should_log": false,
405
  "should_save": true,
406
+ "should_training_stop": true
407
  },
408
  "attributes": {}
409
  }
410
  },
411
+ "total_flos": 1.586645434368e+16,
412
  "train_batch_size": 2,
413
  "trial_name": null,
414
  "trial_params": null