dimasik2987 commited on
Commit
3efda2b
·
verified ·
1 Parent(s): 799e53d

Training in progress, step 12, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61996341056c0dabfab719a24657f6be2c1701f6ab89a6b0227572c4e5cbfd87
3
  size 200068512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aee6814908f60d28f3e5fcc332554daab1d81bbd0d9381a2818cf428f91a055
3
  size 200068512
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71ff81c602eef2b9aea49d39e2dc2325b032b26045cd7cdfca451de5e4ca8984
3
  size 400361770
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c23cd49c0107bd85eef33672afcfbfae97e1304bb32ad1839126632c6ab5c754
3
  size 400361770
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b632b832bdc839a1f87f2b54a86c7465bc00e10f4d1d0eef845b6f6921a2856e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fa5a15dacc246c77ef90ba7e7896379953025d6077daf340f31bc19decacb1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26a93b2a1f4b5368650119fe6e0d6eec6d19cda6badeba4d21943ab48964fa00
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68888158764ed5e658b457a541f86335ea31432325308674d2962aa98e037fa4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.002153576956726563,
5
  "eval_steps": 4,
6
- "global_step": 8,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -87,6 +87,42 @@
87
  "eval_samples_per_second": 8.368,
88
  "eval_steps_per_second": 4.187,
89
  "step": 8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  }
91
  ],
92
  "logging_steps": 1,
@@ -106,7 +142,7 @@
106
  "attributes": {}
107
  }
108
  },
109
- "total_flos": 6135154892341248.0,
110
  "train_batch_size": 2,
111
  "trial_name": null,
112
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0032303654350898446,
5
  "eval_steps": 4,
6
+ "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
87
  "eval_samples_per_second": 8.368,
88
  "eval_steps_per_second": 4.187,
89
  "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.0024227740763173833,
93
+ "grad_norm": 6.028713226318359,
94
+ "learning_rate": 0.00018,
95
+ "loss": 4.9807,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.0026919711959082038,
100
+ "grad_norm": 3.5283517837524414,
101
+ "learning_rate": 0.0002,
102
+ "loss": 4.4461,
103
+ "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.002961168315499024,
107
+ "grad_norm": 3.948577880859375,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 4.0834,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.0032303654350898446,
114
+ "grad_norm": 3.531851053237915,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 4.6573,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.0032303654350898446,
121
+ "eval_loss": 4.350089073181152,
122
+ "eval_runtime": 186.8405,
123
+ "eval_samples_per_second": 8.376,
124
+ "eval_steps_per_second": 4.191,
125
+ "step": 12
126
  }
127
  ],
128
  "logging_steps": 1,
 
142
  "attributes": {}
143
  }
144
  },
145
+ "total_flos": 9202732338511872.0,
146
  "train_batch_size": 2,
147
  "trial_name": null,
148
  "trial_params": null