CocoRoF commited on
Commit
a16f3d2
·
verified ·
1 Parent(s): c9ddde8

Training in progress, step 10000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f17723ea2dc8d07eec950ce6661a040e4a2bf91a0a1310fde192975a612831b
3
  size 962707376
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a650c90512a42dd4683e8a973d7b2a127d961133cf781e96a2f0bedbf19c86ff
3
  size 962707376
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b2d310cc85410edafa6b19e0fb8e40f731b33476548775abce57515155f9f4f
3
  size 61870586
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9968735e9e258ea0ca015e23b0b8b0c42c3599f8a7328f83e1fd380c91894e9b
3
  size 61870586
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf520f9e4f91c0f6fdff915bd23ec6d457ff663156e78e10369951a31dec58a7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a8bad0a644766abc3efe2e19d0f0855eb6b4e5b56ecabe95af68e9bc0f2d75
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6728d29a0b06760da0481c2ca2a495c6b194e450ebfb9817990e46d2e1aa2a23
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7cf4dee40cabd63b41ea11b2d5474174351dcc28ca3f8824100288377df3d5
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07669cde176c75c081c4da15e4ae6a7b6eda38633d16631cc31f97d7433b5d62
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bcb873a9c3fc793985c939b6c1983b22259cb938f5837ad9557e8686e8fb37e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38aa191ab0e5668c9cb71e6a9b193a01b9b7f5021ef11b8b75e2b040b7b6c3a0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d663db8d609f40e7f1b8517b937f6ab700284f01dc91d7fd7acdf1e743e492
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3fdc1907a2f4143bc0e898e77db3a36296150c8c0c843a6c9c3db4c1bcad112
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff2eeebb1ff1345658711a7b947405ee1bda725b6384a44aad58e2e369e255d2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5044249296188354,
3
- "best_model_checkpoint": "/workspace/plateer_classifier_v0.1_result/checkpoint-5000",
4
- "epoch": 0.029280390483287486,
5
  "eval_steps": 5000,
6
- "global_step": 5000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -156,6 +156,155 @@
156
  "eval_samples_per_second": 210.432,
157
  "eval_steps_per_second": 6.576,
158
  "step": 5000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  }
160
  ],
161
  "logging_steps": 250,
@@ -175,7 +324,7 @@
175
  "attributes": {}
176
  }
177
  },
178
- "total_flos": 1.2957223124310426e+18,
179
  "train_batch_size": 8,
180
  "trial_name": null,
181
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.457188218832016,
3
+ "best_model_checkpoint": "/workspace/plateer_classifier_v0.1_result/checkpoint-10000",
4
+ "epoch": 0.05856078096657497,
5
  "eval_steps": 5000,
6
+ "global_step": 10000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
156
  "eval_samples_per_second": 210.432,
157
  "eval_steps_per_second": 6.576,
158
  "step": 5000
159
+ },
160
+ {
161
+ "epoch": 0.03074441000745186,
162
+ "grad_norm": 24.74563217163086,
163
+ "learning_rate": 0.00010484,
164
+ "loss": 0.5073,
165
+ "step": 5250
166
+ },
167
+ {
168
+ "epoch": 0.032208429531616234,
169
+ "grad_norm": 17.229019165039062,
170
+ "learning_rate": 0.00010984,
171
+ "loss": 0.4932,
172
+ "step": 5500
173
+ },
174
+ {
175
+ "epoch": 0.03367244905578061,
176
+ "grad_norm": 23.318979263305664,
177
+ "learning_rate": 0.00011484000000000002,
178
+ "loss": 0.504,
179
+ "step": 5750
180
+ },
181
+ {
182
+ "epoch": 0.035136468579944985,
183
+ "grad_norm": 22.271846771240234,
184
+ "learning_rate": 0.00011983999999999999,
185
+ "loss": 0.4817,
186
+ "step": 6000
187
+ },
188
+ {
189
+ "epoch": 0.036600488104109354,
190
+ "grad_norm": 24.304887771606445,
191
+ "learning_rate": 0.00012484,
192
+ "loss": 0.4966,
193
+ "step": 6250
194
+ },
195
+ {
196
+ "epoch": 0.03806450762827373,
197
+ "grad_norm": 23.76158905029297,
198
+ "learning_rate": 0.00012984000000000002,
199
+ "loss": 0.4899,
200
+ "step": 6500
201
+ },
202
+ {
203
+ "epoch": 0.039528527152438105,
204
+ "grad_norm": 20.765274047851562,
205
+ "learning_rate": 0.00013484,
206
+ "loss": 0.4773,
207
+ "step": 6750
208
+ },
209
+ {
210
+ "epoch": 0.04099254667660248,
211
+ "grad_norm": 12.793950080871582,
212
+ "learning_rate": 0.00013982000000000003,
213
+ "loss": 0.4781,
214
+ "step": 7000
215
+ },
216
+ {
217
+ "epoch": 0.042456566200766856,
218
+ "grad_norm": 14.128210067749023,
219
+ "learning_rate": 0.00014482,
220
+ "loss": 0.4687,
221
+ "step": 7250
222
+ },
223
+ {
224
+ "epoch": 0.043920585724931224,
225
+ "grad_norm": 22.348928451538086,
226
+ "learning_rate": 0.00014982,
227
+ "loss": 0.4722,
228
+ "step": 7500
229
+ },
230
+ {
231
+ "epoch": 0.0453846052490956,
232
+ "grad_norm": 17.29800796508789,
233
+ "learning_rate": 0.00015480000000000002,
234
+ "loss": 0.4692,
235
+ "step": 7750
236
+ },
237
+ {
238
+ "epoch": 0.046848624773259975,
239
+ "grad_norm": 11.0147066116333,
240
+ "learning_rate": 0.0001598,
241
+ "loss": 0.4689,
242
+ "step": 8000
243
+ },
244
+ {
245
+ "epoch": 0.04831264429742435,
246
+ "grad_norm": 11.713265419006348,
247
+ "learning_rate": 0.0001648,
248
+ "loss": 0.4788,
249
+ "step": 8250
250
+ },
251
+ {
252
+ "epoch": 0.049776663821588726,
253
+ "grad_norm": 12.367693901062012,
254
+ "learning_rate": 0.0001698,
255
+ "loss": 0.4697,
256
+ "step": 8500
257
+ },
258
+ {
259
+ "epoch": 0.0512406833457531,
260
+ "grad_norm": 8.11889934539795,
261
+ "learning_rate": 0.00017480000000000002,
262
+ "loss": 0.4696,
263
+ "step": 8750
264
+ },
265
+ {
266
+ "epoch": 0.05270470286991747,
267
+ "grad_norm": 12.321019172668457,
268
+ "learning_rate": 0.0001798,
269
+ "loss": 0.461,
270
+ "step": 9000
271
+ },
272
+ {
273
+ "epoch": 0.054168722394081846,
274
+ "grad_norm": 15.612183570861816,
275
+ "learning_rate": 0.00018480000000000002,
276
+ "loss": 0.4646,
277
+ "step": 9250
278
+ },
279
+ {
280
+ "epoch": 0.05563274191824622,
281
+ "grad_norm": 10.72978687286377,
282
+ "learning_rate": 0.0001898,
283
+ "loss": 0.4673,
284
+ "step": 9500
285
+ },
286
+ {
287
+ "epoch": 0.0570967614424106,
288
+ "grad_norm": 8.815441131591797,
289
+ "learning_rate": 0.0001948,
290
+ "loss": 0.4472,
291
+ "step": 9750
292
+ },
293
+ {
294
+ "epoch": 0.05856078096657497,
295
+ "grad_norm": 8.681705474853516,
296
+ "learning_rate": 0.0001998,
297
+ "loss": 0.4629,
298
+ "step": 10000
299
+ },
300
+ {
301
+ "epoch": 0.05856078096657497,
302
+ "eval_accuracy": 0.8688706572649133,
303
+ "eval_loss": 0.457188218832016,
304
+ "eval_runtime": 11537.8227,
305
+ "eval_samples_per_second": 210.492,
306
+ "eval_steps_per_second": 6.578,
307
+ "step": 10000
308
  }
309
  ],
310
  "logging_steps": 250,
 
324
  "attributes": {}
325
  }
326
  },
327
+ "total_flos": 2.591444624862085e+18,
328
  "train_batch_size": 8,
329
  "trial_name": null,
330
  "trial_params": null