dada22231 commited on
Commit
dd69004
·
verified ·
1 Parent(s): 87bd829

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aff9f85c78bb1313f336acceb889e423988a7f7072a130b0058ca32fe3d8b7e
3
  size 239536272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:033a56672257d2be9832c051692dceff2c6d86f9f750ee15caf028d3fc5c8e46
3
  size 239536272
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:327f8ff05f44a5c27b46e17d2f7c8d50f0601064dfc227bf796e76279cbbd110
3
  size 479362682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bceccf0f4e7a110d3c1a475939ca486c219f9b25efa001b83a99effe160cb078
3
  size 479362682
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec215d0987b1599ddea0e5ceb4db8ca5f9c0d15cd4e11b1960d198226c2ce7f9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07d049760a64bae661bec26d42023972279de934b92a5245705ca10929c862f
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffa785c34328f04c63665117ecc37ec357550e2261e2b06954d6932effb26455
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:119dcdd5e62603ae96a3f194bba585af70ccf458e1bfcca04489c86625cf5102
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:253aba309d4a808b07a88b5a50d70a7aec3382e480ffe9b5bf2dd59edcf1920d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df9bf94b7676e5f66f77bb275750b3b7ab89a563abffde5ac332e98cf80df8e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22dfeb079f604599def1042937a6a9bcc4b5d9f374df0bd33600df6a6a702f06
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a01e50e322fec9d738d53421bb50e8bc3b5daabb4124dc74f2e4f5da50ecccdf
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.452702283859253,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.47961630695443647,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 16.283,
199
  "eval_steps_per_second": 4.234,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 1.114626154561536e+17,
230
  "train_batch_size": 1,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.4119333028793335,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.9592326139088729,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 16.283,
199
  "eval_steps_per_second": 4.234,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.4988009592326139,
204
+ "grad_norm": 0.3837337791919708,
205
+ "learning_rate": 5.500000000000001e-05,
206
+ "loss": 1.4694,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.5179856115107914,
211
+ "grad_norm": 0.31981226801872253,
212
+ "learning_rate": 5.205685918464356e-05,
213
+ "loss": 1.4007,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.5371702637889688,
218
+ "grad_norm": 0.29301542043685913,
219
+ "learning_rate": 4.912632135009769e-05,
220
+ "loss": 1.425,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.5563549160671463,
225
+ "grad_norm": 0.27098387479782104,
226
+ "learning_rate": 4.6220935509274235e-05,
227
+ "loss": 1.3783,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.5755395683453237,
232
+ "grad_norm": 0.2766132652759552,
233
+ "learning_rate": 4.3353142970386564e-05,
234
+ "loss": 1.3708,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.5947242206235012,
239
+ "grad_norm": 0.2871565520763397,
240
+ "learning_rate": 4.053522406135775e-05,
241
+ "loss": 1.3912,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.6139088729016786,
246
+ "grad_norm": 0.2908610701560974,
247
+ "learning_rate": 3.777924554357096e-05,
248
+ "loss": 1.3315,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.6330935251798561,
253
+ "grad_norm": 0.332882285118103,
254
+ "learning_rate": 3.509700894014496e-05,
255
+ "loss": 1.2766,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.6522781774580336,
260
+ "grad_norm": 0.2988963723182678,
261
+ "learning_rate": 3.250000000000001e-05,
262
+ "loss": 1.3176,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.6714628297362111,
267
+ "grad_norm": 0.32519230246543884,
268
+ "learning_rate": 2.9999339514117912e-05,
269
+ "loss": 1.3037,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.6906474820143885,
274
+ "grad_norm": 0.33219724893569946,
275
+ "learning_rate": 2.760573569460757e-05,
276
+ "loss": 1.3243,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.709832134292566,
281
+ "grad_norm": 0.36349430680274963,
282
+ "learning_rate": 2.53294383204969e-05,
283
+ "loss": 1.3258,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.7290167865707434,
288
+ "grad_norm": 0.3923780918121338,
289
+ "learning_rate": 2.3180194846605367e-05,
290
+ "loss": 1.4683,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.7482014388489209,
295
+ "grad_norm": 0.45209190249443054,
296
+ "learning_rate": 2.1167208663446025e-05,
297
+ "loss": 1.4447,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.7673860911270983,
302
+ "grad_norm": 0.44667184352874756,
303
+ "learning_rate": 1.9299099686894423e-05,
304
+ "loss": 1.3867,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.7865707434052758,
309
+ "grad_norm": 0.4654199182987213,
310
+ "learning_rate": 1.758386744638546e-05,
311
+ "loss": 1.3885,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.8057553956834532,
316
+ "grad_norm": 0.4635009169578552,
317
+ "learning_rate": 1.602885682970026e-05,
318
+ "loss": 1.3306,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.8249400479616307,
323
+ "grad_norm": 0.3623160719871521,
324
+ "learning_rate": 1.464072663102903e-05,
325
+ "loss": 1.3448,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.8441247002398081,
330
+ "grad_norm": 0.2907363176345825,
331
+ "learning_rate": 1.3425421036992098e-05,
332
+ "loss": 1.2858,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.8633093525179856,
337
+ "grad_norm": 0.2541451156139374,
338
+ "learning_rate": 1.2388144172720251e-05,
339
+ "loss": 1.3097,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.882494004796163,
344
+ "grad_norm": 0.25829795002937317,
345
+ "learning_rate": 1.1533337816991932e-05,
346
+ "loss": 1.3359,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.9016786570743405,
351
+ "grad_norm": 0.27368149161338806,
352
+ "learning_rate": 1.0864662381854632e-05,
353
+ "loss": 1.2904,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.920863309352518,
358
+ "grad_norm": 0.3009626567363739,
359
+ "learning_rate": 1.0384981238178534e-05,
360
+ "loss": 1.3405,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.9400479616306955,
365
+ "grad_norm": 0.34668755531311035,
366
+ "learning_rate": 1.0096348454262845e-05,
367
+ "loss": 1.3204,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.9592326139088729,
372
+ "grad_norm": 0.4955449104309082,
373
+ "learning_rate": 1e-05,
374
+ "loss": 1.3941,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.9592326139088729,
379
+ "eval_loss": 1.4119333028793335,
380
+ "eval_runtime": 2.9994,
381
+ "eval_samples_per_second": 16.67,
382
+ "eval_steps_per_second": 4.334,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 2.229252309123072e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null