dakwi commited on
Commit
68470b4
1 Parent(s): 1233594

Training in progress, step 46884, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70c18e946f85425f6567052844e41d2867eeaebc42b9d0914dd8b574b0a90250
3
  size 1212421632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad22409dd3402ca2ecb2d47072950a781c2354f8fe3126a2eb8ae11d06d6f91
3
  size 1212421632
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00c962ee4d551a268cbe4209e3cc4608eec7297067ebdd0b7068487ac0e6d875
3
  size 2425026746
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34383a9541866f09afaa4a919db6586fc17b5a80e50adb155191dfb6ef63172f
3
  size 2425026746
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:131f01080b97a6c52d218e6520880b7ceb15e4aa7848381329475ad933095f08
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a17b6d7baa40034984255c4a98182f6d1a6527dbdc4c020a846d422cc34862
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:976b034a9b39ad83d57162167aaac3de3fd744a06cc8fc9da1e877c04b7770e1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeafad747f6fc6398eb5f68e05cf9290eccdae3f01277834a013bd717a9799cb
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8574004173278809,
3
  "best_model_checkpoint": "chessgpt2-medium-l/checkpoint-46000",
4
- "epoch": 2.9434348605067826,
5
  "eval_steps": 2000,
6
- "global_step": 46000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -375,12 +375,12 @@
375
  "should_evaluate": false,
376
  "should_log": false,
377
  "should_save": true,
378
- "should_training_stop": false
379
  },
380
  "attributes": {}
381
  }
382
  },
383
- "total_flos": 1.811390306196652e+18,
384
  "train_batch_size": 32,
385
  "trial_name": null,
386
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8574004173278809,
3
  "best_model_checkpoint": "chessgpt2-medium-l/checkpoint-46000",
4
+ "epoch": 3.0,
5
  "eval_steps": 2000,
6
+ "global_step": 46884,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
375
  "should_evaluate": false,
376
  "should_log": false,
377
  "should_save": true,
378
+ "should_training_stop": true
379
  },
380
  "attributes": {}
381
  }
382
  },
383
+ "total_flos": 1.8462142604897157e+18,
384
  "train_batch_size": 32,
385
  "trial_name": null,
386
  "trial_params": null