dimasik2987 commited on
Commit
4678aeb
·
verified ·
1 Parent(s): 1288518

Training in progress, step 32, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12643d73306d913bb59b9fef8ebce83ff830057d2ff5d47969f3e4d61fdebd0c
3
  size 200068512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c8eb8325ee56887bcac39c7154198c4c29da0906f5c3ea537f9c33b12522a7
3
  size 200068512
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e3f5b4eec3e66d80a2fd8c3c6b19d5807558d664d02d881def3beb6b46979ab
3
  size 400361770
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c50dbc656fd8436361ad2967fe1a74152d58ec2100319552b81b9e758a442b69
3
  size 400361770
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5c166beb56023c475d0a1e4488772a1479f599ca29e77e7106a93caafb4ed00
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7a7ac14760662453a208d3a2065875c8aad63732e589bcd3c513e8d0acfc26
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff994fffd2fb6fe21545e6fbc55baa2a1474438a89b2d40605678f7de701427c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06c69db2db6de56f38ba12b474a491d20087e27dc2893a95d6ac7716476ca645
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.007537519348542971,
5
  "eval_steps": 4,
6
- "global_step": 28,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -267,6 +267,42 @@
267
  "eval_samples_per_second": 8.363,
268
  "eval_steps_per_second": 4.184,
269
  "step": 28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  }
271
  ],
272
  "logging_steps": 1,
@@ -286,7 +322,7 @@
286
  "attributes": {}
287
  }
288
  },
289
- "total_flos": 2.147304212319437e+16,
290
  "train_batch_size": 2,
291
  "trial_name": null,
292
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.008614307826906252,
5
  "eval_steps": 4,
6
+ "global_step": 32,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
267
  "eval_samples_per_second": 8.363,
268
  "eval_steps_per_second": 4.184,
269
  "step": 28
270
+ },
271
+ {
272
+ "epoch": 0.007806716468133791,
273
+ "grad_norm": 2.46905255317688,
274
+ "learning_rate": 0.0001078459095727845,
275
+ "loss": 4.1957,
276
+ "step": 29
277
+ },
278
+ {
279
+ "epoch": 0.00807591358772461,
280
+ "grad_norm": 2.6972992420196533,
281
+ "learning_rate": 0.0001,
282
+ "loss": 3.7234,
283
+ "step": 30
284
+ },
285
+ {
286
+ "epoch": 0.008345110707315431,
287
+ "grad_norm": 2.946199655532837,
288
+ "learning_rate": 9.215409042721552e-05,
289
+ "loss": 3.8905,
290
+ "step": 31
291
+ },
292
+ {
293
+ "epoch": 0.008614307826906252,
294
+ "grad_norm": 2.631840944290161,
295
+ "learning_rate": 8.435655349597689e-05,
296
+ "loss": 3.8763,
297
+ "step": 32
298
+ },
299
+ {
300
+ "epoch": 0.008614307826906252,
301
+ "eval_loss": 3.9116568565368652,
302
+ "eval_runtime": 187.6034,
303
+ "eval_samples_per_second": 8.342,
304
+ "eval_steps_per_second": 4.174,
305
+ "step": 32
306
  }
307
  ],
308
  "logging_steps": 1,
 
322
  "attributes": {}
323
  }
324
  },
325
+ "total_flos": 2.454061956936499e+16,
326
  "train_batch_size": 2,
327
  "trial_name": null,
328
  "trial_params": null