bhuvanmdev commited on
Commit
0bf5b8a
·
verified ·
1 Parent(s): ee7ab1c

Training in progress, step 2300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4d0e6f6e105ca8708dd58c58cac0d38ef3d7e182e4118c1d7c3bbbb4cfbb8e5
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f78f8e128e75e11b3bd24fe3cafa27eb00c4b149658842b4a45b224ec5f5ee22
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fe2060f54f8854e34b115c7210a27ece708af54ff47a9aed67412ee4f2d4639
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b815a874278b1223221c3e50dc80f1ff0a4372ec10c87401ec27f3ea0c36a67a
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1817c09855fd07da9d0e441c81f4b0b9ce05c1e1787ca7dcb5d996cd39c530fd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502b3acd424d63dc379a6747cfdc8fa58c6c30b6487e9ba722dee624541749e4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36cd02824fbe35ffd9c39b1765aeeef980a727adc03234507c83ce617d1b746f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc458452416b9e8ad2bf3686e504ce90d7aec8e9da53788a9e1ab68961ab5745
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.815742397137746,
5
  "eval_steps": 500,
6
- "global_step": 2280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1831,14 +1831,30 @@
1831
  "loss": 0.3834,
1832
  "num_input_tokens_seen": 1545040,
1833
  "step": 2280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1834
  }
1835
  ],
1836
  "logging_steps": 10,
1837
  "max_steps": 2795,
1838
- "num_input_tokens_seen": 1545040,
1839
  "num_train_epochs": 1,
1840
  "save_steps": 20,
1841
- "total_flos": 3.474249345810432e+16,
1842
  "train_batch_size": 1,
1843
  "trial_name": null,
1844
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.8228980322003577,
5
  "eval_steps": 500,
6
+ "global_step": 2300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1831
  "loss": 0.3834,
1832
  "num_input_tokens_seen": 1545040,
1833
  "step": 2280
1834
+ },
1835
+ {
1836
+ "epoch": 0.8193202146690519,
1837
+ "grad_norm": 0.5155879855155945,
1838
+ "learning_rate": 3.6135957066189624e-05,
1839
+ "loss": 0.3814,
1840
+ "num_input_tokens_seen": 1552028,
1841
+ "step": 2290
1842
+ },
1843
+ {
1844
+ "epoch": 0.8228980322003577,
1845
+ "grad_norm": 0.2956937551498413,
1846
+ "learning_rate": 3.5420393559928444e-05,
1847
+ "loss": 0.395,
1848
+ "num_input_tokens_seen": 1559137,
1849
+ "step": 2300
1850
  }
1851
  ],
1852
  "logging_steps": 10,
1853
  "max_steps": 2795,
1854
+ "num_input_tokens_seen": 1559137,
1855
  "num_train_epochs": 1,
1856
  "save_steps": 20,
1857
+ "total_flos": 3.5059485206071296e+16,
1858
  "train_batch_size": 1,
1859
  "trial_name": null,
1860
  "trial_params": null