dada22231 commited on
Commit
082ebef
·
verified ·
1 Parent(s): fcc2a36

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ac8c5d64d28bb1e8c27c52b4c9244a4b619f8114363c18f4403ae67934c881f
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:932ae829166f8602d43570391428e34cf30578b6a6647c202d4bf116784db684
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2489d862c27b7e04716b082654059cee9d81be5bc99ff62d2f6d699d5d74a13
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1722e3a32255624620b0599e9dbb3d6d62da761f4e1ca1ec8c97c4df69bcb83c
3
  size 671466706
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ff7c17184a2a63ce890f39f18b74b58a2c3e8f57792117daa2c4fdb0f733653
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4211050507b57dc2ac7b7cafe32e1b2f7aae53225dcddfb60e261ffaaffe9d51
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a053b7de268396c428a47bf54fac9f28dadfa78484e33dbec2baebe336d6f433
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8926f288f20d72cd9a6d1763235af942d4f86c92a42f2873d652d1c6769a3afc
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c35a17d6e6faf28c2f73548ca0c57714999731e978643c0b1d98bbb5db6a2f79
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43f47c0228ac0277a685add1666dcff595a7067dbb4424f831582c78bcfd39e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4336eb2c776ad1c2f74fb07721aeed5d99ea1b3a141f9dd6f619fc342d37b4c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53b458359efd7953f1953937f4e6de1d77ada83b566d6c409c3085f210e67f88
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6901664137840271,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.17293558149589278,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 10.578,
199
  "eval_steps_per_second": 2.75,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -212,7 +395,7 @@
212
  "early_stopping_threshold": 0.0
213
  },
214
  "attributes": {
215
- "early_stopping_patience_counter": 0
216
  }
217
  },
218
  "TrainerControl": {
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 2.829010669142016e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6901664137840271,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 0.34587116299178555,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 10.578,
199
  "eval_steps_per_second": 2.75,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.1798530047557285,
204
+ "grad_norm": 20.78670310974121,
205
+ "learning_rate": 5.500000000000001e-05,
206
+ "loss": 11.0287,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.1867704280155642,
211
+ "grad_norm": 7.750968933105469,
212
+ "learning_rate": 5.205685918464356e-05,
213
+ "loss": 10.9515,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.19368785127539992,
218
+ "grad_norm": 19.583345413208008,
219
+ "learning_rate": 4.912632135009769e-05,
220
+ "loss": 11.2867,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.20060527453523563,
225
+ "grad_norm": 8.281968116760254,
226
+ "learning_rate": 4.6220935509274235e-05,
227
+ "loss": 10.9828,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.20752269779507135,
232
+ "grad_norm": 12.24773120880127,
233
+ "learning_rate": 4.3353142970386564e-05,
234
+ "loss": 11.1671,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.21444012105490703,
239
+ "grad_norm": 17.732666015625,
240
+ "learning_rate": 4.053522406135775e-05,
241
+ "loss": 11.0593,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.22135754431474275,
246
+ "grad_norm": 18.043264389038086,
247
+ "learning_rate": 3.777924554357096e-05,
248
+ "loss": 11.2308,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.22827496757457846,
253
+ "grad_norm": 18.405481338500977,
254
+ "learning_rate": 3.509700894014496e-05,
255
+ "loss": 11.2062,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.23519239083441418,
260
+ "grad_norm": 21.06205177307129,
261
+ "learning_rate": 3.250000000000001e-05,
262
+ "loss": 11.0728,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.2421098140942499,
267
+ "grad_norm": 13.367883682250977,
268
+ "learning_rate": 2.9999339514117912e-05,
269
+ "loss": 11.2255,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.2490272373540856,
274
+ "grad_norm": 12.570308685302734,
275
+ "learning_rate": 2.760573569460757e-05,
276
+ "loss": 11.0571,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.2559446606139213,
281
+ "grad_norm": 8.503302574157715,
282
+ "learning_rate": 2.53294383204969e-05,
283
+ "loss": 11.0228,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.26286208387375704,
288
+ "grad_norm": 8.865936279296875,
289
+ "learning_rate": 2.3180194846605367e-05,
290
+ "loss": 10.9888,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.2697795071335927,
295
+ "grad_norm": 5.083145618438721,
296
+ "learning_rate": 2.1167208663446025e-05,
297
+ "loss": 11.0964,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.27669693039342846,
302
+ "grad_norm": 8.988863945007324,
303
+ "learning_rate": 1.9299099686894423e-05,
304
+ "loss": 11.1576,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.28361435365326415,
309
+ "grad_norm": 10.590516090393066,
310
+ "learning_rate": 1.758386744638546e-05,
311
+ "loss": 11.1371,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.2905317769130999,
316
+ "grad_norm": 5.303899765014648,
317
+ "learning_rate": 1.602885682970026e-05,
318
+ "loss": 11.0594,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.2974492001729356,
323
+ "grad_norm": 11.25604248046875,
324
+ "learning_rate": 1.464072663102903e-05,
325
+ "loss": 10.9194,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.30436662343277127,
330
+ "grad_norm": 6.66317081451416,
331
+ "learning_rate": 1.3425421036992098e-05,
332
+ "loss": 10.9729,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.311284046692607,
337
+ "grad_norm": 5.8001790046691895,
338
+ "learning_rate": 1.2388144172720251e-05,
339
+ "loss": 10.8979,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.3182014699524427,
344
+ "grad_norm": 11.265183448791504,
345
+ "learning_rate": 1.1533337816991932e-05,
346
+ "loss": 11.2311,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.32511889321227844,
351
+ "grad_norm": 7.881767272949219,
352
+ "learning_rate": 1.0864662381854632e-05,
353
+ "loss": 11.0845,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.3320363164721141,
358
+ "grad_norm": 16.301807403564453,
359
+ "learning_rate": 1.0384981238178534e-05,
360
+ "loss": 11.2441,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.33895373973194987,
365
+ "grad_norm": 10.308152198791504,
366
+ "learning_rate": 1.0096348454262845e-05,
367
+ "loss": 10.9921,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.34587116299178555,
372
+ "grad_norm": 7.8041486740112305,
373
+ "learning_rate": 1e-05,
374
+ "loss": 10.9746,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.34587116299178555,
379
+ "eval_loss": 0.6932806372642517,
380
+ "eval_runtime": 4.7426,
381
+ "eval_samples_per_second": 10.543,
382
+ "eval_steps_per_second": 2.741,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
395
  "early_stopping_threshold": 0.0
396
  },
397
  "attributes": {
398
+ "early_stopping_patience_counter": 1
399
  }
400
  },
401
  "TrainerControl": {
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 5.658021338284032e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null