bhuvanmdev commited on
Commit
4a63ab4
·
verified ·
1 Parent(s): 780a96a

Training in progress, step 2480, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bedf1b6d5e32ae60cbf34f929bfa462bde2adec5e00a44530266b9d926cec517
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d857d487611677e327f686f5bb0b2c412a46dd20a76142f12b663b725d04116
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8ab70123d88a30c78dbc1cf668156ef4539a947d64d12664cc583ba9139315b
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec04c8625fb5e8405492e6ebc4888b578fc58f8848b213588210076bbf8fba0f
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab90daea2cc7962c335eac8ebb5f569b419b5cbc004628f6c9c5c4d750b2fa9b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55465423aa4bddb0a9699c8aac4197f916d1b1aa5ba2c8f60a7f814d309a283f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c215c6736359b4426fb423f696f85c3a29879ea9d026ccf6cd9e1acb18851c2d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7285ecba5ee57b8ddee91381a24dda5a0f3ce744a4081805f6d4aba444269721
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.8801431127012522,
5
  "eval_steps": 500,
6
- "global_step": 2460,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1975,14 +1975,30 @@
1975
  "loss": 0.3874,
1976
  "num_input_tokens_seen": 1668160,
1977
  "step": 2460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1978
  }
1979
  ],
1980
  "logging_steps": 10,
1981
  "max_steps": 2795,
1982
- "num_input_tokens_seen": 1668160,
1983
  "num_train_epochs": 1,
1984
  "save_steps": 20,
1985
- "total_flos": 3.751102747312128e+16,
1986
  "train_batch_size": 1,
1987
  "trial_name": null,
1988
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.8872987477638641,
5
  "eval_steps": 500,
6
+ "global_step": 2480,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1975
  "loss": 0.3874,
1976
  "num_input_tokens_seen": 1668160,
1977
  "step": 2460
1978
+ },
1979
+ {
1980
+ "epoch": 0.8837209302325582,
1981
+ "grad_norm": 0.30337658524513245,
1982
+ "learning_rate": 2.3255813953488374e-05,
1983
+ "loss": 0.3951,
1984
+ "num_input_tokens_seen": 1675594,
1985
+ "step": 2470
1986
+ },
1987
+ {
1988
+ "epoch": 0.8872987477638641,
1989
+ "grad_norm": 0.2993133068084717,
1990
+ "learning_rate": 2.2540250447227194e-05,
1991
+ "loss": 0.3572,
1992
+ "num_input_tokens_seen": 1682014,
1993
+ "step": 2480
1994
  }
1995
  ],
1996
  "logging_steps": 10,
1997
  "max_steps": 2795,
1998
+ "num_input_tokens_seen": 1682014,
1999
  "num_train_epochs": 1,
2000
  "save_steps": 20,
2001
+ "total_flos": 3.782255500921651e+16,
2002
  "train_batch_size": 1,
2003
  "trial_name": null,
2004
  "trial_params": null