alicegoesdown commited on
Commit
295634f
·
verified ·
1 Parent(s): 71db994

Training in progress, step 4800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c45690a70256adfe8dd45d76011859829a38eda506426664d152411d1b72f917
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abda0cc3f363bb1d2e21196f721d2f01aa4382c5ceaa574817afc5e8dc4cdc35
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e86010a8bd8ac86c0da506532b1a58f0933e38eeaf686e8a9175bad6b82bb54
3
  size 671467026
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a75f97f2400857bb0b642cfa6b90faa1b43e91b5646b7b99bfbf7ac6a8684262
3
  size 671467026
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e94900abd055492c0da44212d9ffd5c8a1ff716215108e409f60e12f4f5ac395
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cd3505f81776b63461918f5cbbc7762bdbe9c1b1cf66972ef67fa311e524655
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9612206721dab747b72325faca201c5b62b462fd9a44bcf14483ab477a2645b1
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:741e5a74b4b8a7e18ee94356eb03e28c998d3e32cf81a6bc98c8f2bfb8fb01d7
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 3.310655355453491,
3
- "best_model_checkpoint": "./output/checkpoint-4650",
4
- "epoch": 28.012048192771083,
5
  "eval_steps": 150,
6
- "global_step": 4650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3510,6 +3510,119 @@
3510
  "eval_samples_per_second": 9.649,
3511
  "eval_steps_per_second": 9.649,
3512
  "step": 4650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3513
  }
3514
  ],
3515
  "logging_steps": 10,
@@ -3529,7 +3642,7 @@
3529
  "attributes": {}
3530
  }
3531
  },
3532
- "total_flos": 1.5087887023546368e+16,
3533
  "train_batch_size": 2,
3534
  "trial_name": null,
3535
  "trial_params": null
 
1
  {
2
+ "best_metric": 3.3106162548065186,
3
+ "best_model_checkpoint": "./output/checkpoint-4800",
4
+ "epoch": 28.91566265060241,
5
  "eval_steps": 150,
6
+ "global_step": 4800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3510
  "eval_samples_per_second": 9.649,
3511
  "eval_steps_per_second": 9.649,
3512
  "step": 4650
3513
+ },
3514
+ {
3515
+ "epoch": 28.072289156626507,
3516
+ "grad_norm": 9.195672035217285,
3517
+ "learning_rate": 1.4790913492997438e-06,
3518
+ "loss": 2.6247,
3519
+ "step": 4660
3520
+ },
3521
+ {
3522
+ "epoch": 28.132530120481928,
3523
+ "grad_norm": 14.86363697052002,
3524
+ "learning_rate": 1.3936855405155408e-06,
3525
+ "loss": 2.8831,
3526
+ "step": 4670
3527
+ },
3528
+ {
3529
+ "epoch": 28.19277108433735,
3530
+ "grad_norm": 11.328372955322266,
3531
+ "learning_rate": 1.3107915729816954e-06,
3532
+ "loss": 2.7178,
3533
+ "step": 4680
3534
+ },
3535
+ {
3536
+ "epoch": 28.253012048192772,
3537
+ "grad_norm": 15.23580551147461,
3538
+ "learning_rate": 1.230412854144547e-06,
3539
+ "loss": 3.1991,
3540
+ "step": 4690
3541
+ },
3542
+ {
3543
+ "epoch": 28.313253012048193,
3544
+ "grad_norm": 169.89739990234375,
3545
+ "learning_rate": 1.15255268805841e-06,
3546
+ "loss": 3.0329,
3547
+ "step": 4700
3548
+ },
3549
+ {
3550
+ "epoch": 28.373493975903614,
3551
+ "grad_norm": 323.2249450683594,
3552
+ "learning_rate": 1.0772142752497604e-06,
3553
+ "loss": 3.1795,
3554
+ "step": 4710
3555
+ },
3556
+ {
3557
+ "epoch": 28.433734939759034,
3558
+ "grad_norm": 24.42875099182129,
3559
+ "learning_rate": 1.004400712585646e-06,
3560
+ "loss": 2.8986,
3561
+ "step": 4720
3562
+ },
3563
+ {
3564
+ "epoch": 28.49397590361446,
3565
+ "grad_norm": 14.24337387084961,
3566
+ "learning_rate": 9.341149931464537e-07,
3567
+ "loss": 2.7472,
3568
+ "step": 4730
3569
+ },
3570
+ {
3571
+ "epoch": 28.55421686746988,
3572
+ "grad_norm": 90.30467224121094,
3573
+ "learning_rate": 8.663600061028162e-07,
3574
+ "loss": 2.9509,
3575
+ "step": 4740
3576
+ },
3577
+ {
3578
+ "epoch": 28.6144578313253,
3579
+ "grad_norm": 32.362003326416016,
3580
+ "learning_rate": 8.011385365968641e-07,
3581
+ "loss": 3.0889,
3582
+ "step": 4750
3583
+ },
3584
+ {
3585
+ "epoch": 28.674698795180724,
3586
+ "grad_norm": 20.82928466796875,
3587
+ "learning_rate": 7.384532656277698e-07,
3588
+ "loss": 3.0301,
3589
+ "step": 4760
3590
+ },
3591
+ {
3592
+ "epoch": 28.734939759036145,
3593
+ "grad_norm": 12.928521156311035,
3594
+ "learning_rate": 6.783067699414891e-07,
3595
+ "loss": 2.8634,
3596
+ "step": 4770
3597
+ },
3598
+ {
3599
+ "epoch": 28.795180722891565,
3600
+ "grad_norm": 14.081971168518066,
3601
+ "learning_rate": 6.207015219248866e-07,
3602
+ "loss": 2.6318,
3603
+ "step": 4780
3604
+ },
3605
+ {
3606
+ "epoch": 28.855421686746986,
3607
+ "grad_norm": 13.051186561584473,
3608
+ "learning_rate": 5.656398895040813e-07,
3609
+ "loss": 2.7618,
3610
+ "step": 4790
3611
+ },
3612
+ {
3613
+ "epoch": 28.91566265060241,
3614
+ "grad_norm": 221.75543212890625,
3615
+ "learning_rate": 5.131241360471217e-07,
3616
+ "loss": 3.0757,
3617
+ "step": 4800
3618
+ },
3619
+ {
3620
+ "epoch": 28.91566265060241,
3621
+ "eval_loss": 3.3106162548065186,
3622
+ "eval_runtime": 3.8397,
3623
+ "eval_samples_per_second": 9.636,
3624
+ "eval_steps_per_second": 9.636,
3625
+ "step": 4800
3626
  }
3627
  ],
3628
  "logging_steps": 10,
 
3642
  "attributes": {}
3643
  }
3644
  },
3645
+ "total_flos": 1.5578958526488576e+16,
3646
  "train_batch_size": 2,
3647
  "trial_name": null,
3648
  "trial_params": null