adammandic87 commited on
Commit
d1c67eb
·
verified ·
1 Parent(s): 808d75b

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fae818c11675d199a9c7ca2c355b4caed3ad33e14ad2f4a9db0ed15ac1186f9f
3
  size 80792096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2ef0261b9297e57eb0e6889cbd70bd9fc32b7ddb742c7a9bd122e427882cb1
3
  size 80792096
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc1f76dd5ab649ca72bfe988e5db26e67c4889add94764ff40842ba584301846
3
  size 41459700
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:623c8669599ab8606c91a89f68efd3817190db7cebdd8f7009d841dfbc06cf13
3
  size 41459700
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1d6e1fa09234a6b9aeabf5e7a61d21b1152d5256cf463aede866e5f4c72a842
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c512fb5e6f5d109f5378fc46a214147599d5eb7b9ff95241caad816b4b88a833
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b80fcc7599efca0c6313d990c467c2eb3001742b23ddaadc22e3499c12cea79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02304147465437788,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 27.76,
59
  "eval_steps_per_second": 13.88,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 1.1370700200738816e+16,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04608294930875576,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 27.76,
59
  "eval_steps_per_second": 13.88,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.027649769585253458,
64
+ "grad_norm": 0.9648954272270203,
65
+ "learning_rate": 0.0002,
66
+ "loss": 1.2425,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.03225806451612903,
71
+ "grad_norm": 1.180851697921753,
72
+ "learning_rate": 0.0002,
73
+ "loss": 1.1576,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.03686635944700461,
78
+ "grad_norm": 0.9131804704666138,
79
+ "learning_rate": 0.0002,
80
+ "loss": 1.1993,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.041474654377880185,
85
+ "grad_norm": 0.9733966588973999,
86
+ "learning_rate": 0.0002,
87
+ "loss": 1.2544,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.04608294930875576,
92
+ "grad_norm": 1.0335547924041748,
93
+ "learning_rate": 0.0002,
94
+ "loss": 1.223,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.04608294930875576,
99
+ "eval_loss": 1.1788889169692993,
100
+ "eval_runtime": 32.9063,
101
+ "eval_samples_per_second": 27.776,
102
+ "eval_steps_per_second": 13.888,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 2.2828532203782144e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null