kinleyrabgay commited on
Commit
a740962
·
verified ·
1 Parent(s): 030e11d

Training in progress, epoch 5, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a580f8931d3c0b73a4622991aa6cb3b40337ac5190443f4d57543128a06debd
3
  size 2460354912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f94034709b397975548cb9fbe3a07aad5648c6a4a432299e1011979d35fd8920
3
  size 2460354912
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c445f2be47afa8fe35e10ccfbab01c2115861a34c88c68d4f9a2329b3c655bf
3
  size 4921023445
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ddb21a7614a291f781d08059c7d74bb4d34d15bbd3bbabd7058de72e6e59ba2
3
  size 4921023445
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c8a2de6fd3300223d2a2c6022b35cc0aae70c2ee044eca6760de0d2de20cf17
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dfc59386df7407a0fc45116f54b32e769582aa411c1cba47cc07dc2aeebc682
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41216328ab75de937007afb04d76156949bedb908461001a451c8991c2ba8cca
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:061929cf33bbb1db778e3f3bba75a30feaf7aa9ae494505513aa8b545044f0b4
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a11a75238b320f6122b6a3754c879f05bf15124609a375c0697fe088d06519c3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f82ec84100546c47a76f63e10c051f300392ef520458425efd0cbf0afe3718ce
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "best_global_step": 3750,
3
- "best_metric": 59.30988278861181,
4
- "best_model_checkpoint": "nllb-200-600M-dzo-eng-checkpoints/checkpoint-3750",
5
- "epoch": 4.0,
6
  "eval_steps": 500,
7
- "global_step": 5000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -114,6 +114,29 @@
114
  "eval_samples_per_second": 5.951,
115
  "eval_steps_per_second": 1.488,
116
  "step": 5000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  }
118
  ],
119
  "logging_steps": 500,
@@ -128,12 +151,12 @@
128
  "should_evaluate": false,
129
  "should_log": false,
130
  "should_save": true,
131
- "should_training_stop": false
132
  },
133
  "attributes": {}
134
  }
135
  },
136
- "total_flos": 1.083552301056e+16,
137
  "train_batch_size": 4,
138
  "trial_name": null,
139
  "trial_params": null
 
1
  {
2
+ "best_global_step": 6250,
3
+ "best_metric": 59.5126771383472,
4
+ "best_model_checkpoint": "nllb-200-600M-dzo-eng-checkpoints/checkpoint-6250",
5
+ "epoch": 5.0,
6
  "eval_steps": 500,
7
+ "global_step": 6250,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
114
  "eval_samples_per_second": 5.951,
115
  "eval_steps_per_second": 1.488,
116
  "step": 5000
117
+ },
118
+ {
119
+ "epoch": 4.4,
120
+ "grad_norm": 0.17317089438438416,
121
+ "learning_rate": 3.6048e-06,
122
+ "loss": 0.0337,
123
+ "step": 5500
124
+ },
125
+ {
126
+ "epoch": 4.8,
127
+ "grad_norm": 0.32834669947624207,
128
+ "learning_rate": 1.2048e-06,
129
+ "loss": 0.033,
130
+ "step": 6000
131
+ },
132
+ {
133
+ "epoch": 5.0,
134
+ "eval_bleu": 59.5126771383472,
135
+ "eval_loss": 0.07744310051202774,
136
+ "eval_runtime": 168.6113,
137
+ "eval_samples_per_second": 5.931,
138
+ "eval_steps_per_second": 1.483,
139
+ "step": 6250
140
  }
141
  ],
142
  "logging_steps": 500,
 
151
  "should_evaluate": false,
152
  "should_log": false,
153
  "should_save": true,
154
+ "should_training_stop": true
155
  },
156
  "attributes": {}
157
  }
158
  },
159
+ "total_flos": 1.35444037632e+16,
160
  "train_batch_size": 4,
161
  "trial_name": null,
162
  "trial_params": null