nttx commited on
Commit
3247f90
·
verified ·
1 Parent(s): aeaf42e

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1472fd3175b7ecbc2401975708851b9ad53adc2f625f3fff177700cec5b00361
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20265465f47ed40420d027859aeafe6a4a6e4e3e9148bc256769e270576f8336
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:719d7757fcb51c761a13bbcccd311357f67f980af9041d6740cf235463842bef
3
  size 43123028
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0cd321e569cee7371d3bea7258f212c5e15eca62240341d944ef83ce3c8646e
3
  size 43123028
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c0a65ddb80c6261e59ab21c6d6bd531c9de851ba7b801b665eb686802ed7f21
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75105a7479ba58f1e99c9a19943e91bb9dadcb2ed4c28f87642caecf60cf2ef3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cb61e48d266c471cccd477f025b3f7a3c0435861637d5fe39610779eedceb14
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4e625cf5525cf235a8c7e4c6984ba05d1b5436f9e969799dcec0119a3a6c2a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.3632562160491943,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-1200",
4
- "epoch": 0.25929827404586336,
5
  "eval_steps": 300,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -215,6 +215,56 @@
215
  "eval_samples_per_second": 14.662,
216
  "eval_steps_per_second": 1.833,
217
  "step": 1200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  }
219
  ],
220
  "logging_steps": 50,
@@ -238,12 +288,12 @@
238
  "should_evaluate": false,
239
  "should_log": false,
240
  "should_save": true,
241
- "should_training_stop": false
242
  },
243
  "attributes": {}
244
  }
245
  },
246
- "total_flos": 3.2790924555379016e+18,
247
  "train_batch_size": 8,
248
  "trial_name": null,
249
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.3561660051345825,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-1500",
4
+ "epoch": 0.3241228425573292,
5
  "eval_steps": 300,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
215
  "eval_samples_per_second": 14.662,
216
  "eval_steps_per_second": 1.833,
217
  "step": 1200
218
+ },
219
+ {
220
+ "epoch": 0.2701023687977744,
221
+ "grad_norm": 0.4282647669315338,
222
+ "learning_rate": 1.5327580077171587e-05,
223
+ "loss": 1.3584,
224
+ "step": 1250
225
+ },
226
+ {
227
+ "epoch": 0.28090646354968535,
228
+ "grad_norm": 0.4491986930370331,
229
+ "learning_rate": 9.903113209758096e-06,
230
+ "loss": 1.3788,
231
+ "step": 1300
232
+ },
233
+ {
234
+ "epoch": 0.2917105583015963,
235
+ "grad_norm": 0.7209851145744324,
236
+ "learning_rate": 5.611666969163243e-06,
237
+ "loss": 1.3648,
238
+ "step": 1350
239
+ },
240
+ {
241
+ "epoch": 0.3025146530535073,
242
+ "grad_norm": 0.4760664701461792,
243
+ "learning_rate": 2.5072087818176382e-06,
244
+ "loss": 1.3583,
245
+ "step": 1400
246
+ },
247
+ {
248
+ "epoch": 0.31331874780541824,
249
+ "grad_norm": 0.49940499663352966,
250
+ "learning_rate": 6.287790106757396e-07,
251
+ "loss": 1.3675,
252
+ "step": 1450
253
+ },
254
+ {
255
+ "epoch": 0.3241228425573292,
256
+ "grad_norm": 0.4709605276584625,
257
+ "learning_rate": 0.0,
258
+ "loss": 1.3532,
259
+ "step": 1500
260
+ },
261
+ {
262
+ "epoch": 0.3241228425573292,
263
+ "eval_loss": 1.3561660051345825,
264
+ "eval_runtime": 1061.9702,
265
+ "eval_samples_per_second": 14.679,
266
+ "eval_steps_per_second": 1.835,
267
+ "step": 1500
268
  }
269
  ],
270
  "logging_steps": 50,
 
288
  "should_evaluate": false,
289
  "should_log": false,
290
  "should_save": true,
291
+ "should_training_stop": true
292
  },
293
  "attributes": {}
294
  }
295
  },
296
+ "total_flos": 4.100668894564319e+18,
297
  "train_batch_size": 8,
298
  "trial_name": null,
299
  "trial_params": null