dimasik2987 commited on
Commit
ef29a4a
·
verified ·
1 Parent(s): 826f63d

Training in progress, step 24, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f23450359fdb07ebcd0caa0b3b757cc7ebb632d49851262494267f85df9b75f9
3
  size 200068512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da90fa0e16e41fdfe764b4234dbea16e19ff591391333eb9adcdc7f6278266c7
3
  size 200068512
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:968db29b6bed2f48495b5e4b3842813ade8836d75e6e318a78f20b3c4b192404
3
  size 400361770
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe28944b0e69e5f100787ef18ce315e0ecd9f75ff6d8c81f53c778898769765
3
  size 400361770
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6f5fe99d81e7517b5957d450f6702d8ebbd0807e19095a89194ae9f341f65dc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6653bc060345fb346135d580a2bf3cff0c36f2c54b348b5459115515481320b6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470670603e8fdc5330cdb9a9152c4fd9c3d8c5a74dd26bffbbb0d869d097eafa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0053839423918164075,
5
  "eval_steps": 4,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -195,6 +195,42 @@
195
  "eval_samples_per_second": 8.369,
196
  "eval_steps_per_second": 4.187,
197
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  }
199
  ],
200
  "logging_steps": 1,
@@ -214,7 +250,7 @@
214
  "attributes": {}
215
  }
216
  },
217
- "total_flos": 1.533788723085312e+16,
218
  "train_batch_size": 2,
219
  "trial_name": null,
220
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006460730870179689,
5
  "eval_steps": 4,
6
+ "global_step": 24,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
195
  "eval_samples_per_second": 8.369,
196
  "eval_steps_per_second": 4.187,
197
  "step": 20
198
+ },
199
+ {
200
+ "epoch": 0.005653139511407228,
201
+ "grad_norm": 2.7933688163757324,
202
+ "learning_rate": 0.00016494480483301836,
203
+ "loss": 3.8037,
204
+ "step": 21
205
+ },
206
+ {
207
+ "epoch": 0.005922336630998048,
208
+ "grad_norm": 2.6206228733062744,
209
+ "learning_rate": 0.00015877852522924732,
210
+ "loss": 4.1224,
211
+ "step": 22
212
+ },
213
+ {
214
+ "epoch": 0.006191533750588869,
215
+ "grad_norm": 2.956394910812378,
216
+ "learning_rate": 0.0001522498564715949,
217
+ "loss": 3.8059,
218
+ "step": 23
219
+ },
220
+ {
221
+ "epoch": 0.006460730870179689,
222
+ "grad_norm": 2.445277690887451,
223
+ "learning_rate": 0.00014539904997395468,
224
+ "loss": 3.8613,
225
+ "step": 24
226
+ },
227
+ {
228
+ "epoch": 0.006460730870179689,
229
+ "eval_loss": 3.9950814247131348,
230
+ "eval_runtime": 186.8191,
231
+ "eval_samples_per_second": 8.377,
232
+ "eval_steps_per_second": 4.191,
233
+ "step": 24
234
  }
235
  ],
236
  "logging_steps": 1,
 
250
  "attributes": {}
251
  }
252
  },
253
+ "total_flos": 1.8405464677023744e+16,
254
  "train_batch_size": 2,
255
  "trial_name": null,
256
  "trial_params": null