neuralwonderland commited on
Commit
a2c5567
·
verified ·
1 Parent(s): 02bbd09

Training in progress, step 4650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:929e2242a9a0088d0fb4cc09d992569fcd02f496312666ac45152ad941d623c0
3
  size 17447528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be74aa09adbfb9e73506b97419af908a66b89c1b2a4ba8124bfebd46147e88a8
3
  size 17447528
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b1527a879c6977fe19797b0101cf77d86e24b5440b9a07ee810cb6a1281d301
3
- size 34959738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f881a76326a796266f0a52903e1c1d25f422c832a117b36f746de49c5f08b8ca
3
+ size 34959674
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cbf8c3fc08ab547eb72202933b91049b04677fe8f9edb364f6daca74147a4218
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959ec0bf93f91561900c708117dbef11ece1d13f9503fe4346cd174869c09a90
3
  size 14308
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16bf4acbbdf4243a5b30121f973d426853acaed3d0caa7d9deb065ee076e1e1d
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9612206721dab747b72325faca201c5b62b462fd9a44bcf14483ab477a2645b1
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.480732798576355,
3
- "best_model_checkpoint": "./output/checkpoint-4500",
4
- "epoch": 0.7087730351236415,
5
  "eval_steps": 150,
6
- "global_step": 4500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3397,6 +3397,119 @@
3397
  "eval_samples_per_second": 13.469,
3398
  "eval_steps_per_second": 13.469,
3399
  "step": 4500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3400
  }
3401
  ],
3402
  "logging_steps": 10,
@@ -3416,7 +3529,7 @@
3416
  "attributes": {}
3417
  }
3418
  },
3419
- "total_flos": 4.6814781814677504e+17,
3420
  "train_batch_size": 16,
3421
  "trial_name": null,
3422
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.4804431200027466,
3
+ "best_model_checkpoint": "./output/checkpoint-4650",
4
+ "epoch": 0.7323988029610963,
5
  "eval_steps": 150,
6
+ "global_step": 4650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3397
  "eval_samples_per_second": 13.469,
3398
  "eval_steps_per_second": 13.469,
3399
  "step": 4500
3400
+ },
3401
+ {
3402
+ "epoch": 0.7103480863128052,
3403
+ "grad_norm": 0.8629345893859863,
3404
+ "learning_rate": 3.0589677315529044e-06,
3405
+ "loss": 1.4749,
3406
+ "step": 4510
3407
+ },
3408
+ {
3409
+ "epoch": 0.7119231375019688,
3410
+ "grad_norm": 0.8621689677238464,
3411
+ "learning_rate": 2.9363630392945513e-06,
3412
+ "loss": 1.4772,
3413
+ "step": 4520
3414
+ },
3415
+ {
3416
+ "epoch": 0.7134981886911325,
3417
+ "grad_norm": 0.873957633972168,
3418
+ "learning_rate": 2.816206774856854e-06,
3419
+ "loss": 1.4937,
3420
+ "step": 4530
3421
+ },
3422
+ {
3423
+ "epoch": 0.7150732398802961,
3424
+ "grad_norm": 0.9086722135543823,
3425
+ "learning_rate": 2.6985038773932046e-06,
3426
+ "loss": 1.4512,
3427
+ "step": 4540
3428
+ },
3429
+ {
3430
+ "epoch": 0.7166482910694597,
3431
+ "grad_norm": 0.8475430607795715,
3432
+ "learning_rate": 2.583259185208714e-06,
3433
+ "loss": 1.4621,
3434
+ "step": 4550
3435
+ },
3436
+ {
3437
+ "epoch": 0.7182233422586234,
3438
+ "grad_norm": 0.8581358790397644,
3439
+ "learning_rate": 2.4704774355612943e-06,
3440
+ "loss": 1.4746,
3441
+ "step": 4560
3442
+ },
3443
+ {
3444
+ "epoch": 0.7197983934477871,
3445
+ "grad_norm": 0.8703014254570007,
3446
+ "learning_rate": 2.3601632644669536e-06,
3447
+ "loss": 1.4906,
3448
+ "step": 4570
3449
+ },
3450
+ {
3451
+ "epoch": 0.7213734446369507,
3452
+ "grad_norm": 0.9130226373672485,
3453
+ "learning_rate": 2.2523212065091723e-06,
3454
+ "loss": 1.4825,
3455
+ "step": 4580
3456
+ },
3457
+ {
3458
+ "epoch": 0.7229484958261143,
3459
+ "grad_norm": 0.8566045165061951,
3460
+ "learning_rate": 2.1469556946525706e-06,
3461
+ "loss": 1.4732,
3462
+ "step": 4590
3463
+ },
3464
+ {
3465
+ "epoch": 0.724523547015278,
3466
+ "grad_norm": 0.8569677472114563,
3467
+ "learning_rate": 2.0440710600606595e-06,
3468
+ "loss": 1.4725,
3469
+ "step": 4600
3470
+ },
3471
+ {
3472
+ "epoch": 0.7260985982044417,
3473
+ "grad_norm": 0.9196586012840271,
3474
+ "learning_rate": 1.9436715319177956e-06,
3475
+ "loss": 1.4832,
3476
+ "step": 4610
3477
+ },
3478
+ {
3479
+ "epoch": 0.7276736493936052,
3480
+ "grad_norm": 0.8641564249992371,
3481
+ "learning_rate": 1.8457612372553348e-06,
3482
+ "loss": 1.4994,
3483
+ "step": 4620
3484
+ },
3485
+ {
3486
+ "epoch": 0.7292487005827689,
3487
+ "grad_norm": 0.8218653798103333,
3488
+ "learning_rate": 1.75034420078201e-06,
3489
+ "loss": 1.4748,
3490
+ "step": 4630
3491
+ },
3492
+ {
3493
+ "epoch": 0.7308237517719326,
3494
+ "grad_norm": 0.8673424124717712,
3495
+ "learning_rate": 1.6574243447184597e-06,
3496
+ "loss": 1.4779,
3497
+ "step": 4640
3498
+ },
3499
+ {
3500
+ "epoch": 0.7323988029610963,
3501
+ "grad_norm": 0.8800205588340759,
3502
+ "learning_rate": 1.567005488636024e-06,
3503
+ "loss": 1.4927,
3504
+ "step": 4650
3505
+ },
3506
+ {
3507
+ "epoch": 0.7323988029610963,
3508
+ "eval_loss": 1.4804431200027466,
3509
+ "eval_runtime": 37.6922,
3510
+ "eval_samples_per_second": 13.292,
3511
+ "eval_steps_per_second": 13.292,
3512
+ "step": 4650
3513
  }
3514
  ],
3515
  "logging_steps": 10,
 
3529
  "attributes": {}
3530
  }
3531
  },
3532
+ "total_flos": 4.836760721871176e+17,
3533
  "train_batch_size": 16,
3534
  "trial_name": null,
3535
  "trial_params": null