sandernotenbaert commited on
Commit
3582f55
·
verified ·
1 Parent(s): a6608c6

Training in progress, step 5500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52d626255d3fda2c0de4ca84328c83dbcced0b5ce4b0d0e1c6cf31b50694922b
3
  size 30214176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b56fbdf7fe55206aba37bd5ae400349b2464f89eeb757ae10fcf1d6efcc7644f
3
  size 30214176
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26d6d4bce7a94c7ee7f993f30f3cb3a1b9aebeeef0cdc917b1c334ecd930d145
3
  size 291962
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613a8d1a657ba89ada23ab7d7633f3cf31fb4364c0614b7c8174ec86a031e3af
3
  size 291962
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:716e3016868ceaf8d4f0dbe38b820cfb087f035aa02a736305ef885eb0b2f9a9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78393450795811d9dfafdce73145f39f52c703d761292801d15750c186cc2118
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38bb49bc916eec650d156e5efb1f9040e7833436ca5df10a36a3cd87a7164579
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27cf200ae168599617b8ef6dbb08dc689bd11010b7655ba355b260440e03cb3c
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82c6cad2f916ae1c4597aa277a2ae6583004fe151268a304e96495ea21dc7bbe
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa325f8460c9be76052157bd9b7d0c22035d7e2dbd36acf508d25edf25974d7b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": 4500,
3
  "best_metric": 1.5784235000610352,
4
  "best_model_checkpoint": "./results/hierarchical_music_t5_small_finetune/checkpoint-4500",
5
- "epoch": 2.226186169472659,
6
  "eval_steps": 500,
7
- "global_step": 5000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -788,6 +788,84 @@
788
  "eval_samples_per_second": 390.719,
789
  "eval_steps_per_second": 48.855,
790
  "step": 5000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791
  }
792
  ],
793
  "logging_steps": 50,
@@ -802,7 +880,7 @@
802
  "early_stopping_threshold": 0.0
803
  },
804
  "attributes": {
805
- "early_stopping_patience_counter": 1
806
  }
807
  },
808
  "TrainerControl": {
@@ -816,7 +894,7 @@
816
  "attributes": {}
817
  }
818
  },
819
- "total_flos": 1.984755905276928e+16,
820
  "train_batch_size": 4,
821
  "trial_name": null,
822
  "trial_params": null
 
2
  "best_global_step": 4500,
3
  "best_metric": 1.5784235000610352,
4
  "best_model_checkpoint": "./results/hierarchical_music_t5_small_finetune/checkpoint-4500",
5
+ "epoch": 2.4488103520244886,
6
  "eval_steps": 500,
7
+ "global_step": 5500,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
788
  "eval_samples_per_second": 390.719,
789
  "eval_steps_per_second": 48.855,
790
  "step": 5000
791
+ },
792
+ {
793
+ "epoch": 2.2484485877278417,
794
+ "grad_norm": 1.1154942512512207,
795
+ "learning_rate": 5e-05,
796
+ "loss": 1.6964,
797
+ "step": 5050
798
+ },
799
+ {
800
+ "epoch": 2.270711005983025,
801
+ "grad_norm": 1.117543339729309,
802
+ "learning_rate": 5e-05,
803
+ "loss": 1.6862,
804
+ "step": 5100
805
+ },
806
+ {
807
+ "epoch": 2.292973424238208,
808
+ "grad_norm": 0.9821292161941528,
809
+ "learning_rate": 5e-05,
810
+ "loss": 1.6819,
811
+ "step": 5150
812
+ },
813
+ {
814
+ "epoch": 2.315235842493391,
815
+ "grad_norm": 1.1892586946487427,
816
+ "learning_rate": 5e-05,
817
+ "loss": 1.6964,
818
+ "step": 5200
819
+ },
820
+ {
821
+ "epoch": 2.337498260748574,
822
+ "grad_norm": 1.3049404621124268,
823
+ "learning_rate": 5e-05,
824
+ "loss": 1.682,
825
+ "step": 5250
826
+ },
827
+ {
828
+ "epoch": 2.359760679003757,
829
+ "grad_norm": 1.0873595476150513,
830
+ "learning_rate": 5e-05,
831
+ "loss": 1.6399,
832
+ "step": 5300
833
+ },
834
+ {
835
+ "epoch": 2.38202309725894,
836
+ "grad_norm": 1.0370205640792847,
837
+ "learning_rate": 5e-05,
838
+ "loss": 1.6153,
839
+ "step": 5350
840
+ },
841
+ {
842
+ "epoch": 2.4042855155141227,
843
+ "grad_norm": 0.8503725528717041,
844
+ "learning_rate": 5e-05,
845
+ "loss": 1.6022,
846
+ "step": 5400
847
+ },
848
+ {
849
+ "epoch": 2.4265479337693057,
850
+ "grad_norm": 0.9510111212730408,
851
+ "learning_rate": 5e-05,
852
+ "loss": 1.6106,
853
+ "step": 5450
854
+ },
855
+ {
856
+ "epoch": 2.4488103520244886,
857
+ "grad_norm": 0.9935341477394104,
858
+ "learning_rate": 5e-05,
859
+ "loss": 1.6049,
860
+ "step": 5500
861
+ },
862
+ {
863
+ "epoch": 2.4488103520244886,
864
+ "eval_loss": 1.586571455001831,
865
+ "eval_runtime": 40.7387,
866
+ "eval_samples_per_second": 392.036,
867
+ "eval_steps_per_second": 49.02,
868
+ "step": 5500
869
  }
870
  ],
871
  "logging_steps": 50,
 
880
  "early_stopping_threshold": 0.0
881
  },
882
  "attributes": {
883
+ "early_stopping_patience_counter": 2
884
  }
885
  },
886
  "TrainerControl": {
 
894
  "attributes": {}
895
  }
896
  },
897
+ "total_flos": 2.4081224007407616e+16,
898
  "train_batch_size": 4,
899
  "trial_name": null,
900
  "trial_params": null