jeromeramos commited on
Commit
45ef4f4
·
verified ·
1 Parent(s): ee9f722

Model save

Browse files
README.md CHANGED
@@ -1,9 +1,7 @@
1
  ---
2
  base_model: Sim4Rec/inter-play-sim-assistant-sft
3
- datasets:
4
- - Sim4Rec/dpo_data
5
  library_name: transformers
6
- model_name: Sim4Rec/inter-play-sim-assistant-sft
7
  tags:
8
  - generated_from_trainer
9
  - trl
@@ -11,9 +9,9 @@ tags:
11
  licence: license
12
  ---
13
 
14
- # Model Card for Sim4Rec/inter-play-sim-assistant-sft
15
 
16
- This model is a fine-tuned version of [Sim4Rec/inter-play-sim-assistant-sft](https://huggingface.co/Sim4Rec/inter-play-sim-assistant-sft) on the [['Sim4Rec/dpo_data']](https://huggingface.co/datasets/['Sim4Rec/dpo_data']) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jerome-ramos-20/huggingface/runs/q3zfrxdb)
33
 
34
 
35
  This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
 
1
  ---
2
  base_model: Sim4Rec/inter-play-sim-assistant-sft
 
 
3
  library_name: transformers
4
+ model_name: inter-play-sim-assistant-dpo
5
  tags:
6
  - generated_from_trainer
7
  - trl
 
9
  licence: license
10
  ---
11
 
12
+ # Model Card for inter-play-sim-assistant-dpo
13
 
14
+ This model is a fine-tuned version of [Sim4Rec/inter-play-sim-assistant-sft](https://huggingface.co/Sim4Rec/inter-play-sim-assistant-sft).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jerome-ramos-20/huggingface/runs/k9xn3f7n)
31
 
32
 
33
  This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 2.0,
3
  "total_flos": 0.0,
4
- "train_loss": 0.22515048312672067,
5
- "train_runtime": 2958.2669,
6
  "train_samples": 45561,
7
- "train_samples_per_second": 30.802,
8
- "train_steps_per_second": 0.241
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.23233672156092827,
5
+ "train_runtime": 2773.067,
6
  "train_samples": 45561,
7
+ "train_samples_per_second": 16.43,
8
+ "train_steps_per_second": 0.257
9
  }
config.json CHANGED
@@ -32,6 +32,6 @@
32
  "tie_word_embeddings": false,
33
  "torch_dtype": "bfloat16",
34
  "transformers_version": "4.48.2",
35
- "use_cache": true,
36
  "vocab_size": 128320
37
  }
 
32
  "tie_word_embeddings": false,
33
  "torch_dtype": "bfloat16",
34
  "transformers_version": "4.48.2",
35
+ "use_cache": false,
36
  "vocab_size": 128320
37
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:228fda64e183c9fdc3a7708fb3208210cdadad23990e8923b3fe182ecf5c0cec
3
  size 4977222960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1dca7caf67b0af32b1aa5c9c52ad02de5b54e84a8ec038bd74e3bc91b18d8c7
3
  size 4977222960
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f7204be8a0fc4f037c035587cfd68a41793f7158bf6cf5794a16e4fe64cbc8a
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f09aa5fdc5b3591ad4b03edba26dce3703417eba2a78ff25e3f469b3664a254
3
  size 4999802720
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48b841d9240b04b4dfe85f97b3b566448cfbabe276eac748a7ad65411df8577f
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da6625e7a5cfcdd77627f26512cf498048fc24b6547e71e3ef4c3a71e41141d9
3
  size 4915916176
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c785dc0e0713c891a93b159479d9de5baf67a041e01652bf3e46d23a5251c8c
3
  size 1168663096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6115a28a38e67e9c28cd5db2742d2794509bfac5395d3a4d4f58a995d926ed67
3
  size 1168663096
runs/Feb17_14-09-57_w-jerom-inter-play-sim-94c6890b9ccf44ea86f033a3db8a5dbd-6d4ql9c/events.out.tfevents.1739801633.w-jerom-inter-play-sim-94c6890b9ccf44ea86f033a3db8a5dbd-6d4ql9c.17212.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9be616f76489d27fc902d791a648442cd26dc2c39ccc9b948efb7c34c0330f
3
+ size 56172
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3919c1e7bfa558ff525a618a3d463929a238acaba668d7ef6da432fcd6cd7fad
3
- size 17211327
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ea5afcc70a5f73f9b545a5940b211fd23e2acd4d895a3ebc3144ca348a4633
3
+ size 17211228
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 2.0,
3
  "total_flos": 0.0,
4
- "train_loss": 0.22515048312672067,
5
- "train_runtime": 2958.2669,
6
  "train_samples": 45561,
7
- "train_samples_per_second": 30.802,
8
- "train_steps_per_second": 0.241
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.23233672156092827,
5
+ "train_runtime": 2773.067,
6
  "train_samples": 45561,
7
+ "train_samples_per_second": 16.43,
8
+ "train_steps_per_second": 0.257
9
  }
trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 500,
6
  "global_step": 712,
7
  "is_hyper_param_search": false,
@@ -9,13 +9,13 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0028089887640449437,
13
- "grad_norm": 1.1993484497070312,
14
  "learning_rate": 6.9444444444444435e-09,
15
- "logits/chosen": -3.220703125,
16
- "logits/rejected": -3.1796875,
17
- "logps/chosen": -43.75,
18
- "logps/rejected": -42.78125,
19
  "loss": 0.6914,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,1084 +24,1084 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.028089887640449437,
28
- "grad_norm": 0.7986037135124207,
29
  "learning_rate": 6.944444444444444e-08,
30
- "logits/chosen": -3.2486979961395264,
31
- "logits/rejected": -3.2052950859069824,
32
- "logps/chosen": -42.34375,
33
- "logps/rejected": -41.01215362548828,
34
- "loss": 0.6921,
35
- "rewards/accuracies": 0.2161458283662796,
36
- "rewards/chosen": 0.00020620558643713593,
37
- "rewards/margins": -0.00015253490710165352,
38
- "rewards/rejected": 0.0003587139945011586,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.056179775280898875,
43
- "grad_norm": 0.6845986247062683,
44
  "learning_rate": 1.3888888888888888e-07,
45
- "logits/chosen": -3.244140625,
46
- "logits/rejected": -3.219531297683716,
47
- "logps/chosen": -41.37812423706055,
48
- "logps/rejected": -40.42656326293945,
49
- "loss": 0.6907,
50
- "rewards/accuracies": 0.3460937440395355,
51
- "rewards/chosen": 0.006803750991821289,
52
- "rewards/margins": 0.003222918603569269,
53
- "rewards/rejected": 0.003580451011657715,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.08426966292134831,
58
- "grad_norm": 0.5183084607124329,
59
  "learning_rate": 2.0833333333333333e-07,
60
- "logits/chosen": -3.263671875,
61
- "logits/rejected": -3.2261719703674316,
62
- "logps/chosen": -41.142189025878906,
63
- "logps/rejected": -40.00312423706055,
64
- "loss": 0.6862,
65
- "rewards/accuracies": 0.44140625,
66
- "rewards/chosen": 0.02759246900677681,
67
- "rewards/margins": 0.013646435923874378,
68
- "rewards/rejected": 0.013950538821518421,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.11235955056179775,
73
- "grad_norm": 0.5848017334938049,
74
  "learning_rate": 2.7777777777777776e-07,
75
- "logits/chosen": -3.286328077316284,
76
- "logits/rejected": -3.229296922683716,
77
- "logps/chosen": -41.439064025878906,
78
- "logps/rejected": -41.0,
79
- "loss": 0.6745,
80
- "rewards/accuracies": 0.5546875,
81
- "rewards/chosen": 0.039928339421749115,
82
- "rewards/margins": 0.038716744631528854,
83
- "rewards/rejected": 0.001224255538545549,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.1404494382022472,
88
- "grad_norm": 0.7922030091285706,
89
  "learning_rate": 3.472222222222222e-07,
90
- "logits/chosen": -3.2728514671325684,
91
- "logits/rejected": NaN,
92
- "logps/chosen": -41.71406173706055,
93
- "logps/rejected": -41.890625,
94
- "loss": 0.6535,
95
- "rewards/accuracies": 0.645312488079071,
96
- "rewards/chosen": 0.03761863708496094,
97
- "rewards/margins": 0.08368835598230362,
98
- "rewards/rejected": -0.04610452800989151,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.16853932584269662,
103
- "grad_norm": 1.3905400037765503,
104
  "learning_rate": 4.1666666666666667e-07,
105
- "logits/chosen": -3.204296827316284,
106
- "logits/rejected": -3.154296875,
107
- "logps/chosen": -44.493751525878906,
108
- "logps/rejected": -48.446876525878906,
109
- "loss": 0.5839,
110
- "rewards/accuracies": 0.692187488079071,
111
- "rewards/chosen": -0.11696071922779083,
112
- "rewards/margins": 0.25516968965530396,
113
- "rewards/rejected": -0.3720703125,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.19662921348314608,
118
- "grad_norm": 0.8105548024177551,
119
  "learning_rate": 4.861111111111111e-07,
120
- "logits/chosen": -3.23828125,
121
- "logits/rejected": -3.130859375,
122
- "logps/chosen": -47.71875,
123
- "logps/rejected": -59.140625,
124
- "loss": 0.4754,
125
- "rewards/accuracies": 0.7093750238418579,
126
- "rewards/chosen": -0.2639709413051605,
127
- "rewards/margins": 0.6321045160293579,
128
- "rewards/rejected": -0.895751953125,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.2247191011235955,
133
- "grad_norm": 0.6645036339759827,
134
  "learning_rate": 4.998072590601808e-07,
135
- "logits/chosen": -3.2529296875,
136
- "logits/rejected": NaN,
137
- "logps/chosen": -43.765625,
138
- "logps/rejected": -72.40937805175781,
139
- "loss": 0.3394,
140
- "rewards/accuracies": 0.73828125,
141
- "rewards/chosen": -0.08822021633386612,
142
- "rewards/margins": 1.5029785633087158,
143
- "rewards/rejected": -1.591284155845642,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.25280898876404495,
148
- "grad_norm": 0.436211496591568,
149
  "learning_rate": 4.990247583129217e-07,
150
- "logits/chosen": -3.268749952316284,
151
- "logits/rejected": -2.9375,
152
- "logps/chosen": -46.046875,
153
- "logps/rejected": -89.46875,
154
- "loss": 0.2745,
155
- "rewards/accuracies": 0.7476562261581421,
156
- "rewards/chosen": -0.22807636857032776,
157
- "rewards/margins": 2.234375,
158
- "rewards/rejected": -2.463183641433716,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.2808988764044944,
163
- "grad_norm": 0.43823757767677307,
164
  "learning_rate": 4.976423351108942e-07,
165
- "logits/chosen": -3.2183594703674316,
166
  "logits/rejected": NaN,
167
- "logps/chosen": -49.96875,
168
- "logps/rejected": -105.32499694824219,
169
- "loss": 0.2356,
170
- "rewards/accuracies": 0.75390625,
171
- "rewards/chosen": -0.3900146484375,
172
- "rewards/margins": 2.8427734375,
173
- "rewards/rejected": -3.232421875,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.3089887640449438,
178
- "grad_norm": 0.6719679832458496,
179
  "learning_rate": 4.95663319832678e-07,
180
- "logits/chosen": -3.2197265625,
181
- "logits/rejected": -2.762890577316284,
182
- "logps/chosen": -46.25312423706055,
183
- "logps/rejected": -114.41874694824219,
184
- "loss": 0.236,
185
- "rewards/accuracies": 0.7523437738418579,
186
- "rewards/chosen": -0.22943687438964844,
187
- "rewards/margins": 3.4632811546325684,
188
- "rewards/rejected": -3.6929688453674316,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.33707865168539325,
193
- "grad_norm": 0.3029685914516449,
194
  "learning_rate": 4.930924800994191e-07,
195
- "logits/chosen": -3.2417969703674316,
196
- "logits/rejected": -2.752734422683716,
197
- "logps/chosen": -45.203125,
198
- "logps/rejected": -123.83125305175781,
199
- "loss": 0.1955,
200
- "rewards/accuracies": 0.77734375,
201
- "rewards/chosen": -0.130183607339859,
202
- "rewards/margins": 4.001757621765137,
203
- "rewards/rejected": -4.1328125,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.3651685393258427,
208
- "grad_norm": 0.5936453938484192,
209
  "learning_rate": 4.899360092892144e-07,
210
- "logits/chosen": -3.2191405296325684,
211
- "logits/rejected": -2.7181639671325684,
212
- "logps/chosen": -46.69843673706055,
213
- "logps/rejected": -128.28750610351562,
214
- "loss": 0.2039,
215
- "rewards/accuracies": 0.7679687738418579,
216
- "rewards/chosen": -0.2604345381259918,
217
- "rewards/margins": 4.139843940734863,
218
- "rewards/rejected": -4.399609565734863,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.39325842696629215,
223
- "grad_norm": 0.4349266588687897,
224
  "learning_rate": 4.862015116167195e-07,
225
- "logits/chosen": -3.243945360183716,
226
- "logits/rejected": NaN,
227
- "logps/chosen": -45.373435974121094,
228
- "logps/rejected": -133.71249389648438,
229
- "loss": 0.1869,
230
- "rewards/accuracies": 0.7789062261581421,
231
- "rewards/chosen": -0.10051727294921875,
232
- "rewards/margins": 4.529296875,
233
- "rewards/rejected": -4.631054878234863,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.42134831460674155,
238
- "grad_norm": 0.32329249382019043,
239
  "learning_rate": 4.81897983813931e-07,
240
- "logits/chosen": -3.2386717796325684,
241
- "logits/rejected": -2.702929735183716,
242
- "logps/chosen": -46.21562576293945,
243
- "logps/rejected": -138.9375,
244
- "loss": 0.1833,
245
- "rewards/accuracies": 0.780468761920929,
246
- "rewards/chosen": -0.20498999953269958,
247
- "rewards/margins": 4.690625190734863,
248
- "rewards/rejected": -4.895898342132568,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.449438202247191,
253
- "grad_norm": 0.24011513590812683,
254
  "learning_rate": 4.770357934562704e-07,
255
- "logits/chosen": -3.281445264816284,
256
- "logits/rejected": NaN,
257
- "logps/chosen": -41.82500076293945,
258
- "logps/rejected": -132.2624969482422,
259
- "loss": 0.195,
260
- "rewards/accuracies": 0.765625,
261
- "rewards/chosen": 0.00346546177752316,
262
- "rewards/margins": 4.6083984375,
263
- "rewards/rejected": -4.605273246765137,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.47752808988764045,
268
- "grad_norm": 0.5602438449859619,
269
  "learning_rate": 4.716266539861866e-07,
270
- "logits/chosen": -3.237499952316284,
271
- "logits/rejected": -2.6552734375,
272
- "logps/chosen": -47.9921875,
273
- "logps/rejected": -144.77499389648438,
274
- "loss": 0.1834,
275
- "rewards/accuracies": 0.7757812738418579,
276
- "rewards/chosen": -0.31496095657348633,
277
- "rewards/margins": 4.906640529632568,
278
- "rewards/rejected": -5.219336032867432,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.5056179775280899,
283
- "grad_norm": 0.5013967156410217,
284
  "learning_rate": 4.6568359649444796e-07,
285
- "logits/chosen": -3.225781202316284,
286
- "logits/rejected": -2.6431641578674316,
287
- "logps/chosen": -47.4140625,
288
- "logps/rejected": -147.6062469482422,
289
- "loss": 0.1777,
290
- "rewards/accuracies": 0.7835937738418579,
291
- "rewards/chosen": -0.26249465346336365,
292
- "rewards/margins": 5.070898532867432,
293
- "rewards/rejected": -5.333398342132568,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.5337078651685393,
298
- "grad_norm": 0.3109186887741089,
299
  "learning_rate": 4.592209383271023e-07,
300
- "logits/chosen": -3.2769532203674316,
301
- "logits/rejected": -2.708203077316284,
302
- "logps/chosen": -40.32500076293945,
303
- "logps/rejected": -146.55624389648438,
304
- "loss": 0.1691,
305
- "rewards/accuracies": 0.7945312261581421,
306
- "rewards/chosen": 0.10864410549402237,
307
- "rewards/margins": 5.372851371765137,
308
- "rewards/rejected": -5.262499809265137,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.5617977528089888,
313
- "grad_norm": 0.329673707485199,
314
  "learning_rate": 4.5225424859373684e-07,
315
- "logits/chosen": -3.244921922683716,
316
- "logits/rejected": -2.625195264816284,
317
- "logps/chosen": -46.1875,
318
- "logps/rejected": -152.55624389648438,
319
- "loss": 0.1722,
320
- "rewards/accuracies": 0.784375011920929,
321
- "rewards/chosen": -0.20301513373851776,
322
- "rewards/margins": 5.3759765625,
323
- "rewards/rejected": -5.578711032867432,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.5898876404494382,
328
- "grad_norm": 0.6182069182395935,
329
  "learning_rate": 4.448003106601291e-07,
330
- "logits/chosen": -3.2255859375,
331
- "logits/rejected": NaN,
332
- "logps/chosen": -45.904685974121094,
333
- "logps/rejected": -153.14999389648438,
334
- "loss": 0.1818,
335
- "rewards/accuracies": 0.7789062261581421,
336
- "rewards/chosen": -0.2197813093662262,
337
- "rewards/margins": 5.441601753234863,
338
- "rewards/rejected": -5.661913871765137,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.6179775280898876,
343
- "grad_norm": 0.10240374505519867,
344
  "learning_rate": 4.3687708171564917e-07,
345
- "logits/chosen": -3.2201170921325684,
346
- "logits/rejected": NaN,
347
- "logps/chosen": -46.890625,
348
- "logps/rejected": -154.24374389648438,
349
- "loss": 0.18,
350
- "rewards/accuracies": 0.76953125,
351
- "rewards/chosen": -0.2859039306640625,
352
- "rewards/margins": 5.441992282867432,
353
- "rewards/rejected": -5.730078220367432,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.6460674157303371,
358
- "grad_norm": 0.5484092235565186,
359
  "learning_rate": 4.2850364951281705e-07,
360
- "logits/chosen": -3.2359375953674316,
361
- "logits/rejected": -2.595507860183716,
362
- "logps/chosen": -44.52656173706055,
363
- "logps/rejected": -156.5187530517578,
364
- "loss": 0.1763,
365
- "rewards/accuracies": 0.785937488079071,
366
- "rewards/chosen": -0.10360870510339737,
367
- "rewards/margins": 5.668359279632568,
368
- "rewards/rejected": -5.7724609375,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.6741573033707865,
373
- "grad_norm": 0.33201783895492554,
374
  "learning_rate": 4.1970018638323547e-07,
375
- "logits/chosen": -3.2476563453674316,
376
- "logits/rejected": -2.6099610328674316,
377
- "logps/chosen": -41.240623474121094,
378
- "logps/rejected": -152.3249969482422,
379
- "loss": 0.1836,
380
- "rewards/accuracies": 0.778124988079071,
381
- "rewards/chosen": 0.05380706861615181,
382
- "rewards/margins": 5.612500190734863,
383
- "rewards/rejected": -5.559960842132568,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.702247191011236,
388
- "grad_norm": 0.3225279450416565,
389
  "learning_rate": 4.1048790064067573e-07,
390
- "logits/chosen": -3.2152342796325684,
391
- "logits/rejected": NaN,
392
- "logps/chosen": -42.44062423706055,
393
- "logps/rejected": -152.5437469482422,
394
- "loss": 0.1874,
395
- "rewards/accuracies": 0.7679687738418579,
396
- "rewards/chosen": -0.019408416002988815,
397
- "rewards/margins": 5.590234279632568,
398
- "rewards/rejected": -5.608788967132568,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.7303370786516854,
403
- "grad_norm": 0.5013627409934998,
404
  "learning_rate": 4.0088898548839285e-07,
405
- "logits/chosen": -3.1996092796325684,
406
- "logits/rejected": -2.558398485183716,
407
- "logps/chosen": -46.775001525878906,
408
- "logps/rejected": -157.64999389648438,
409
- "loss": 0.1809,
410
- "rewards/accuracies": 0.78125,
411
- "rewards/chosen": -0.24601340293884277,
412
- "rewards/margins": 5.5927734375,
413
- "rewards/rejected": -5.837695121765137,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.7584269662921348,
418
- "grad_norm": 0.2542474567890167,
419
  "learning_rate": 3.9092656555375414e-07,
420
- "logits/chosen": -3.184765577316284,
421
- "logits/rejected": -2.549999952316284,
422
- "logps/chosen": -48.16718673706055,
423
- "logps/rejected": -155.3937530517578,
424
- "loss": 0.1927,
425
- "rewards/accuracies": 0.7593749761581421,
426
- "rewards/chosen": -0.3481277525424957,
427
- "rewards/margins": 5.426171779632568,
428
- "rewards/rejected": -5.775000095367432,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.7865168539325843,
433
- "grad_norm": 1.486909031867981,
434
  "learning_rate": 3.806246411789872e-07,
435
- "logits/chosen": -3.206835985183716,
436
- "logits/rejected": -2.568554639816284,
437
- "logps/chosen": -46.279685974121094,
438
- "logps/rejected": -161.4499969482422,
439
- "loss": 0.1695,
440
- "rewards/accuracies": 0.796875,
441
- "rewards/chosen": -0.17198029160499573,
442
- "rewards/margins": 5.812304496765137,
443
- "rewards/rejected": -5.983984470367432,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.8146067415730337,
448
- "grad_norm": 0.24136996269226074,
449
  "learning_rate": 3.700080306022528e-07,
450
- "logits/chosen": -3.238085985183716,
451
- "logits/rejected": -2.606250047683716,
452
- "logps/chosen": -42.40312576293945,
453
- "logps/rejected": -155.80624389648438,
454
- "loss": 0.1873,
455
- "rewards/accuracies": 0.76953125,
456
- "rewards/chosen": -0.016921233385801315,
457
- "rewards/margins": 5.745703220367432,
458
- "rewards/rejected": -5.7607421875,
459
  "step": 290
460
  },
461
  {
462
- "epoch": 0.8426966292134831,
463
- "grad_norm": 0.3395313620567322,
464
  "learning_rate": 3.5910231016833546e-07,
465
- "logits/chosen": -3.2119140625,
466
- "logits/rejected": NaN,
467
- "logps/chosen": -45.810935974121094,
468
- "logps/rejected": -161.1687469482422,
469
- "loss": 0.174,
470
- "rewards/accuracies": 0.78125,
471
- "rewards/chosen": -0.18533477187156677,
472
- "rewards/margins": 5.834374904632568,
473
- "rewards/rejected": -6.019140720367432,
474
  "step": 300
475
  },
476
  {
477
- "epoch": 0.8707865168539326,
478
- "grad_norm": 0.14939536154270172,
479
  "learning_rate": 3.4793375271298895e-07,
480
- "logits/chosen": -3.1988282203674316,
481
- "logits/rejected": -2.597851514816284,
482
- "logps/chosen": -44.76874923706055,
483
- "logps/rejected": -163.0437469482422,
484
- "loss": 0.1656,
485
- "rewards/accuracies": 0.7867187261581421,
486
- "rewards/chosen": -0.11381302028894424,
487
- "rewards/margins": 5.981835842132568,
488
- "rewards/rejected": -6.096875190734863,
489
  "step": 310
490
  },
491
  {
492
- "epoch": 0.898876404494382,
493
- "grad_norm": 0.6819726228713989,
494
  "learning_rate": 3.3652926426937325e-07,
495
- "logits/chosen": -3.246875047683716,
496
- "logits/rejected": -2.609179735183716,
497
- "logps/chosen": -41.875,
498
- "logps/rejected": -162.28125,
499
- "loss": 0.1824,
500
- "rewards/accuracies": 0.7828124761581421,
501
- "rewards/chosen": 0.06156463548541069,
502
- "rewards/margins": 6.103515625,
503
- "rewards/rejected": -6.043359279632568,
504
  "step": 320
505
  },
506
  {
507
- "epoch": 0.9269662921348315,
508
- "grad_norm": 0.10508494079113007,
509
  "learning_rate": 3.249163192490642e-07,
510
- "logits/chosen": -3.2544922828674316,
511
- "logits/rejected": -2.624804735183716,
512
- "logps/chosen": -41.25,
513
- "logps/rejected": -159.97500610351562,
514
- "loss": 0.1816,
515
- "rewards/accuracies": 0.7734375,
516
- "rewards/chosen": 0.021668624132871628,
517
- "rewards/margins": 5.989648342132568,
518
- "rewards/rejected": -5.967968940734863,
519
  "step": 330
520
  },
521
  {
522
- "epoch": 0.9550561797752809,
523
- "grad_norm": 0.2005680650472641,
524
  "learning_rate": 3.1312289425378944e-07,
525
- "logits/chosen": -3.2759766578674316,
526
- "logits/rejected": -2.6624999046325684,
527
- "logps/chosen": -39.45781326293945,
528
- "logps/rejected": -157.4375,
529
- "loss": 0.1757,
530
- "rewards/accuracies": 0.78125,
531
- "rewards/chosen": 0.14164963364601135,
532
- "rewards/margins": 5.972851753234863,
533
- "rewards/rejected": -5.831250190734863,
534
  "step": 340
535
  },
536
  {
537
- "epoch": 0.9831460674157303,
538
- "grad_norm": 0.2797287404537201,
539
  "learning_rate": 3.011774006773449e-07,
540
- "logits/chosen": -3.282031297683716,
541
- "logits/rejected": NaN,
542
- "logps/chosen": -37.98906326293945,
543
- "logps/rejected": -157.4375,
544
- "loss": 0.1713,
545
- "rewards/accuracies": 0.7757812738418579,
546
- "rewards/chosen": 0.19436034560203552,
547
- "rewards/margins": 6.061718940734863,
548
- "rewards/rejected": -5.866406440734863,
549
  "step": 350
550
  },
551
  {
552
- "epoch": 1.0112359550561798,
553
- "grad_norm": 0.1386144906282425,
554
  "learning_rate": 2.8910861626005773e-07,
555
- "logits/chosen": -3.2330079078674316,
556
- "logits/rejected": -2.6128907203674316,
557
- "logps/chosen": -42.69062423706055,
558
- "logps/rejected": -162.75625610351562,
559
- "loss": 0.1689,
560
- "rewards/accuracies": 0.7942708730697632,
561
- "rewards/chosen": -0.019468307495117188,
562
- "rewards/margins": 6.076367378234863,
563
- "rewards/rejected": -6.09765625,
564
  "step": 360
565
  },
566
  {
567
- "epoch": 1.0393258426966292,
568
- "grad_norm": 0.31265443563461304,
569
  "learning_rate": 2.7694561576068983e-07,
570
- "logits/chosen": -3.234375,
571
- "logits/rejected": -2.5785155296325684,
572
- "logps/chosen": -41.896873474121094,
573
- "logps/rejected": -160.8125,
574
- "loss": 0.1713,
575
- "rewards/accuracies": 0.7867187261581421,
576
- "rewards/chosen": -0.01103897113353014,
577
- "rewards/margins": 6.00390625,
578
- "rewards/rejected": -6.014843940734863,
579
  "step": 370
580
  },
581
  {
582
- "epoch": 1.0674157303370786,
583
- "grad_norm": 0.3972986042499542,
584
  "learning_rate": 2.647177009127972e-07,
585
- "logits/chosen": -3.2544922828674316,
586
- "logits/rejected": -2.5933594703674316,
587
- "logps/chosen": -39.5703125,
588
- "logps/rejected": -160.85000610351562,
589
- "loss": 0.1793,
590
- "rewards/accuracies": 0.768750011920929,
591
- "rewards/chosen": 0.09815521538257599,
592
- "rewards/margins": 6.125781059265137,
593
- "rewards/rejected": -6.029492378234863,
594
  "step": 380
595
  },
596
  {
597
- "epoch": 1.095505617977528,
598
- "grad_norm": 0.12721529603004456,
599
  "learning_rate": 2.524543298342874e-07,
600
- "logits/chosen": -3.262890577316284,
601
- "logits/rejected": -2.592578172683716,
602
- "logps/chosen": -40.4765625,
603
- "logps/rejected": -161.91250610351562,
604
- "loss": 0.1743,
605
- "rewards/accuracies": 0.77734375,
606
- "rewards/chosen": 0.05222644656896591,
607
- "rewards/margins": 6.142578125,
608
- "rewards/rejected": -6.091406345367432,
609
  "step": 390
610
  },
611
  {
612
- "epoch": 1.1235955056179776,
613
- "grad_norm": 0.09501045197248459,
614
  "learning_rate": 2.401850460602329e-07,
615
- "logits/chosen": -3.2220702171325684,
616
- "logits/rejected": -2.559765577316284,
617
- "logps/chosen": -44.423439025878906,
618
- "logps/rejected": -167.08749389648438,
619
- "loss": 0.1718,
620
- "rewards/accuracies": 0.780468761920929,
621
- "rewards/chosen": -0.09567908942699432,
622
- "rewards/margins": 6.20703125,
623
- "rewards/rejected": -6.302734375,
624
  "step": 400
625
  },
626
  {
627
- "epoch": 1.151685393258427,
628
- "grad_norm": 0.29580923914909363,
629
  "learning_rate": 2.2793940736990766e-07,
630
- "logits/chosen": -3.2007813453674316,
631
- "logits/rejected": NaN,
632
- "logps/chosen": -51.12968826293945,
633
- "logps/rejected": -170.96875,
634
- "loss": 0.172,
635
- "rewards/accuracies": 0.780468761920929,
636
- "rewards/chosen": -0.42823487520217896,
637
- "rewards/margins": 6.079297065734863,
638
- "rewards/rejected": -6.508593559265137,
639
  "step": 410
640
  },
641
  {
642
- "epoch": 1.1797752808988764,
643
- "grad_norm": 0.2765229046344757,
644
  "learning_rate": 2.1574691457950803e-07,
645
- "logits/chosen": -3.1839842796325684,
646
  "logits/rejected": NaN,
647
- "logps/chosen": -50.9609375,
648
- "logps/rejected": -170.78125,
649
- "loss": 0.1704,
650
- "rewards/accuracies": 0.7734375,
651
- "rewards/chosen": -0.46327972412109375,
652
- "rewards/margins": 6.071484565734863,
653
- "rewards/rejected": -6.534375190734863,
654
  "step": 420
655
  },
656
  {
657
- "epoch": 1.2078651685393258,
658
- "grad_norm": 0.15300235152244568,
659
  "learning_rate": 2.036369404721023e-07,
660
- "logits/chosen": -3.2113280296325684,
661
- "logits/rejected": -2.531054735183716,
662
- "logps/chosen": -46.8828125,
663
- "logps/rejected": -173.1374969482422,
664
- "loss": 0.1599,
665
- "rewards/accuracies": 0.79296875,
666
- "rewards/chosen": -0.21221771836280823,
667
- "rewards/margins": 6.374609470367432,
668
- "rewards/rejected": -6.587109565734863,
669
  "step": 430
670
  },
671
  {
672
- "epoch": 1.2359550561797752,
673
- "grad_norm": 0.6692606806755066,
674
  "learning_rate": 1.9163865903602372e-07,
675
- "logits/chosen": -3.253124952316284,
676
- "logits/rejected": -2.5308594703674316,
677
- "logps/chosen": -42.287498474121094,
678
- "logps/rejected": -169.625,
679
- "loss": 0.1762,
680
- "rewards/accuracies": 0.776562511920929,
681
- "rewards/chosen": -0.029553985223174095,
682
- "rewards/margins": 6.40625,
683
- "rewards/rejected": -6.434765815734863,
684
  "step": 440
685
  },
686
  {
687
- "epoch": 1.2640449438202248,
688
- "grad_norm": 0.2745380103588104,
689
  "learning_rate": 1.7978097518217702e-07,
690
- "logits/chosen": -3.237499952316284,
691
- "logits/rejected": -2.541015625,
692
- "logps/chosen": -42.74687576293945,
693
- "logps/rejected": -168.78750610351562,
694
- "loss": 0.1715,
695
- "rewards/accuracies": 0.774218738079071,
696
- "rewards/chosen": -0.021244239062070847,
697
- "rewards/margins": 6.392187595367432,
698
- "rewards/rejected": -6.409375190734863,
699
  "step": 450
700
  },
701
  {
702
- "epoch": 1.2921348314606742,
703
- "grad_norm": 0.3858237564563751,
704
  "learning_rate": 1.6809245510957666e-07,
705
- "logits/chosen": -3.235546827316284,
706
- "logits/rejected": NaN,
707
- "logps/chosen": -44.904685974121094,
708
- "logps/rejected": -172.96249389648438,
709
- "loss": 0.1663,
710
- "rewards/accuracies": 0.7875000238418579,
711
- "rewards/chosen": -0.10267486423254013,
712
- "rewards/margins": 6.484375,
713
- "rewards/rejected": -6.587109565734863,
714
  "step": 460
715
  },
716
  {
717
- "epoch": 1.3202247191011236,
718
- "grad_norm": 0.25496408343315125,
719
  "learning_rate": 1.5660125748687093e-07,
720
- "logits/chosen": -3.224414110183716,
721
- "logits/rejected": NaN,
722
- "logps/chosen": -46.64374923706055,
723
- "logps/rejected": -171.1374969482422,
724
- "loss": 0.1703,
725
- "rewards/accuracies": 0.780468761920929,
726
- "rewards/chosen": -0.2242431640625,
727
- "rewards/margins": 6.318749904632568,
728
- "rewards/rejected": -6.542187690734863,
729
  "step": 470
730
  },
731
  {
732
- "epoch": 1.348314606741573,
733
- "grad_norm": 0.3984578251838684,
734
  "learning_rate": 1.4533506561564305e-07,
735
- "logits/chosen": -3.224609375,
736
- "logits/rejected": -2.529101610183716,
737
- "logps/chosen": -47.29375076293945,
738
- "logps/rejected": -173.3625030517578,
739
- "loss": 0.1669,
740
- "rewards/accuracies": 0.780468761920929,
741
- "rewards/chosen": -0.2605232298374176,
742
- "rewards/margins": 6.368359565734863,
743
- "rewards/rejected": -6.627734184265137,
744
  "step": 480
745
  },
746
  {
747
- "epoch": 1.3764044943820224,
748
- "grad_norm": 0.39370596408843994,
749
  "learning_rate": 1.343210207389125e-07,
750
- "logits/chosen": -3.203320264816284,
751
- "logits/rejected": -2.5302734375,
752
- "logps/chosen": -48.0859375,
753
- "logps/rejected": -172.93124389648438,
754
- "loss": 0.1747,
755
- "rewards/accuracies": 0.7757812738418579,
756
- "rewards/chosen": -0.27991026639938354,
757
- "rewards/margins": 6.329297065734863,
758
- "rewards/rejected": -6.610156059265137,
759
  "step": 490
760
  },
761
  {
762
- "epoch": 1.404494382022472,
763
- "grad_norm": 0.2013687640428543,
764
  "learning_rate": 1.2358565665550387e-07,
765
- "logits/chosen": -3.244335889816284,
766
  "logits/rejected": NaN,
767
- "logps/chosen": -41.96875,
768
- "logps/rejected": -176.03750610351562,
769
- "loss": 0.1557,
770
- "rewards/accuracies": 0.793749988079071,
771
- "rewards/chosen": 0.01244263630360365,
772
- "rewards/margins": 6.784765720367432,
773
- "rewards/rejected": -6.769921779632568,
774
  "step": 500
775
  },
776
  {
777
- "epoch": 1.4325842696629214,
778
- "grad_norm": 0.24023930728435516,
779
  "learning_rate": 1.1315483579780094e-07,
780
- "logits/chosen": -3.2484374046325684,
781
- "logits/rejected": -2.5347657203674316,
782
- "logps/chosen": -42.046875,
783
- "logps/rejected": -175.8312530517578,
784
- "loss": 0.1643,
785
- "rewards/accuracies": 0.788281261920929,
786
- "rewards/chosen": 0.040112875401973724,
787
- "rewards/margins": 6.779296875,
788
- "rewards/rejected": -6.739453315734863,
789
  "step": 510
790
  },
791
  {
792
- "epoch": 1.4606741573033708,
793
- "grad_norm": 0.5066425800323486,
794
  "learning_rate": 1.0305368692688174e-07,
795
- "logits/chosen": -3.240429639816284,
796
- "logits/rejected": -2.522656202316284,
797
- "logps/chosen": -43.00468826293945,
798
- "logps/rejected": -173.8625030517578,
799
- "loss": 0.1791,
800
- "rewards/accuracies": 0.778124988079071,
801
- "rewards/chosen": -0.03853149339556694,
802
- "rewards/margins": 6.608788967132568,
803
- "rewards/rejected": -6.646874904632568,
804
  "step": 520
805
  },
806
  {
807
- "epoch": 1.4887640449438202,
808
- "grad_norm": 0.10324753075838089,
809
  "learning_rate": 9.330654459513266e-08,
810
- "logits/chosen": -3.233203172683716,
811
- "logits/rejected": -2.5269532203674316,
812
- "logps/chosen": -41.953125,
813
- "logps/rejected": -172.06875610351562,
814
- "loss": 0.1683,
815
- "rewards/accuracies": 0.7749999761581421,
816
- "rewards/chosen": -0.02672729454934597,
817
- "rewards/margins": 6.571875095367432,
818
- "rewards/rejected": -6.598437309265137,
819
  "step": 530
820
  },
821
  {
822
- "epoch": 1.5168539325842696,
823
- "grad_norm": 0.06884948909282684,
824
  "learning_rate": 8.393689052217964e-08,
825
- "logits/chosen": -3.2310547828674316,
826
- "logits/rejected": -2.5240235328674316,
827
- "logps/chosen": -41.89531326293945,
828
- "logps/rejected": -170.4499969482422,
829
- "loss": 0.1774,
830
  "rewards/accuracies": 0.770312488079071,
831
- "rewards/chosen": -0.03569946438074112,
832
- "rewards/margins": 6.483984470367432,
833
- "rewards/rejected": -6.521093845367432,
834
  "step": 540
835
  },
836
  {
837
- "epoch": 1.5449438202247192,
838
- "grad_norm": 0.2964838445186615,
839
  "learning_rate": 7.49672970253691e-08,
840
- "logits/chosen": -3.2562499046325684,
841
- "logits/rejected": -2.5296874046325684,
842
- "logps/chosen": -40.365623474121094,
843
- "logps/rejected": -173.3625030517578,
844
- "loss": 0.1644,
845
- "rewards/accuracies": 0.78515625,
846
- "rewards/chosen": 0.06430435180664062,
847
- "rewards/margins": 6.696484565734863,
848
- "rewards/rejected": -6.630078315734863,
849
  "step": 550
850
  },
851
  {
852
- "epoch": 1.5730337078651684,
853
- "grad_norm": 0.16073837876319885,
854
  "learning_rate": 6.641937264107867e-08,
855
- "logits/chosen": -3.2535157203674316,
856
- "logits/rejected": -2.5425782203674316,
857
- "logps/chosen": -39.43281173706055,
858
- "logps/rejected": -167.91250610351562,
859
- "loss": 0.1788,
860
- "rewards/accuracies": 0.7671874761581421,
861
- "rewards/chosen": 0.08403320610523224,
862
- "rewards/margins": 6.482421875,
863
- "rewards/rejected": -6.396874904632568,
864
  "step": 560
865
  },
866
  {
867
- "epoch": 1.601123595505618,
868
- "grad_norm": 0.27828249335289,
869
  "learning_rate": 5.831371006785962e-08,
870
- "logits/chosen": -3.2699217796325684,
871
- "logits/rejected": -2.5228514671325684,
872
- "logps/chosen": -39.87187576293945,
873
- "logps/rejected": -171.1437530517578,
874
- "loss": 0.175,
875
- "rewards/accuracies": 0.772656261920929,
876
- "rewards/chosen": 0.07085514068603516,
877
- "rewards/margins": 6.617578029632568,
878
- "rewards/rejected": -6.548047065734863,
879
  "step": 570
880
  },
881
  {
882
- "epoch": 1.6292134831460674,
883
- "grad_norm": 0.386394202709198,
884
  "learning_rate": 5.066983655682325e-08,
885
- "logits/chosen": -3.2455077171325684,
886
- "logits/rejected": -2.546093702316284,
887
- "logps/chosen": -40.72968673706055,
888
- "logps/rejected": -168.77499389648438,
889
- "loss": 0.1783,
890
- "rewards/accuracies": 0.7679687738418579,
891
- "rewards/chosen": 0.032080840319395065,
892
- "rewards/margins": 6.479296684265137,
893
- "rewards/rejected": -6.448828220367432,
894
  "step": 580
895
  },
896
  {
897
- "epoch": 1.6573033707865168,
898
- "grad_norm": 0.3948213756084442,
899
  "learning_rate": 4.3506166868781755e-08,
900
- "logits/chosen": -3.2671875953674316,
901
- "logits/rejected": -2.521484375,
902
- "logps/chosen": -41.20624923706055,
903
- "logps/rejected": -172.6875,
904
- "loss": 0.1759,
905
- "rewards/accuracies": 0.76953125,
906
- "rewards/chosen": 0.012204742059111595,
907
- "rewards/margins": 6.639843940734863,
908
- "rewards/rejected": -6.629296779632568,
909
  "step": 590
910
  },
911
  {
912
- "epoch": 1.6853932584269664,
913
- "grad_norm": 0.4554503560066223,
914
  "learning_rate": 3.683995891147695e-08,
915
- "logits/chosen": -3.246289014816284,
916
  "logits/rejected": NaN,
917
- "logps/chosen": -42.18281173706055,
918
- "logps/rejected": -173.86874389648438,
919
- "loss": 0.1625,
920
  "rewards/accuracies": 0.784375011920929,
921
- "rewards/chosen": 0.0017807006370276213,
922
- "rewards/margins": 6.689453125,
923
- "rewards/rejected": -6.688672065734863,
924
  "step": 600
925
  },
926
  {
927
- "epoch": 1.7134831460674156,
928
- "grad_norm": 0.6775197386741638,
929
  "learning_rate": 3.0687272163768986e-08,
930
- "logits/chosen": -3.259960889816284,
931
- "logits/rejected": NaN,
932
- "logps/chosen": -41.376564025878906,
933
- "logps/rejected": -174.4499969482422,
934
- "loss": 0.1619,
935
- "rewards/accuracies": 0.788281261920929,
936
- "rewards/chosen": 0.07793807983398438,
937
- "rewards/margins": 6.741796970367432,
938
- "rewards/rejected": -6.6640625,
939
  "step": 610
940
  },
941
  {
942
- "epoch": 1.7415730337078652,
943
- "grad_norm": 0.11740182340145111,
944
  "learning_rate": 2.5062928986944676e-08,
945
- "logits/chosen": -3.2728514671325684,
946
- "logits/rejected": NaN,
947
- "logps/chosen": -40.89374923706055,
948
- "logps/rejected": -175.0625,
949
- "loss": 0.1611,
950
- "rewards/accuracies": 0.7835937738418579,
951
- "rewards/chosen": 0.07378844916820526,
952
- "rewards/margins": 6.797656059265137,
953
- "rewards/rejected": -6.723828315734863,
954
  "step": 620
955
  },
956
  {
957
- "epoch": 1.7696629213483146,
958
- "grad_norm": 0.23929810523986816,
959
  "learning_rate": 1.9980478916351296e-08,
960
- "logits/chosen": -3.2544922828674316,
961
- "logits/rejected": -2.5478515625,
962
- "logps/chosen": -42.296875,
963
- "logps/rejected": -174.1374969482422,
964
- "loss": 0.1689,
965
- "rewards/accuracies": 0.785937488079071,
966
- "rewards/chosen": -0.01357345562428236,
967
- "rewards/margins": 6.648046970367432,
968
- "rewards/rejected": -6.661328315734863,
969
  "step": 630
970
  },
971
  {
972
- "epoch": 1.797752808988764,
973
- "grad_norm": 0.06252361834049225,
974
  "learning_rate": 1.5452166019378987e-08,
975
- "logits/chosen": -3.2582030296325684,
976
- "logits/rejected": -2.5250000953674316,
977
- "logps/chosen": -39.837501525878906,
978
- "logps/rejected": -173.9875030517578,
979
- "loss": 0.1693,
980
- "rewards/accuracies": 0.776562511920929,
981
- "rewards/chosen": 0.06287650763988495,
982
- "rewards/margins": 6.756249904632568,
983
- "rewards/rejected": -6.696875095367432,
984
  "step": 640
985
  },
986
  {
987
- "epoch": 1.8258426966292136,
988
- "grad_norm": 0.6466670632362366,
989
  "learning_rate": 1.1488899398429896e-08,
990
- "logits/chosen": -3.2587890625,
991
- "logits/rejected": -2.5443358421325684,
992
- "logps/chosen": -41.12812423706055,
993
- "logps/rejected": -175.3125,
994
- "loss": 0.1643,
995
- "rewards/accuracies": 0.793749988079071,
996
- "rewards/chosen": 0.057323455810546875,
997
- "rewards/margins": 6.766015529632568,
998
- "rewards/rejected": -6.710156440734863,
999
  "step": 650
1000
  },
1001
  {
1002
- "epoch": 1.8539325842696628,
1003
- "grad_norm": 0.364284485578537,
1004
  "learning_rate": 8.100226909935059e-09,
1005
- "logits/chosen": -3.2474608421325684,
1006
- "logits/rejected": -2.5396485328674316,
1007
- "logps/chosen": -43.040626525878906,
1008
- "logps/rejected": -175.02499389648438,
1009
- "loss": 0.1656,
1010
- "rewards/accuracies": 0.7906249761581421,
1011
- "rewards/chosen": -0.026231002062559128,
1012
- "rewards/margins": 6.670312404632568,
1013
- "rewards/rejected": -6.696093559265137,
1014
  "step": 660
1015
  },
1016
  {
1017
- "epoch": 1.8820224719101124,
1018
- "grad_norm": 0.1942785233259201,
1019
  "learning_rate": 5.2943121627319346e-09,
1020
- "logits/chosen": -3.252734422683716,
1021
- "logits/rejected": -2.544140577316284,
1022
- "logps/chosen": -41.951560974121094,
1023
- "logps/rejected": -170.24374389648438,
1024
- "loss": 0.1726,
1025
  "rewards/accuracies": 0.770312488079071,
1026
- "rewards/chosen": -0.00887908972799778,
1027
- "rewards/margins": 6.493359565734863,
1028
- "rewards/rejected": -6.500390529632568,
1029
  "step": 670
1030
  },
1031
  {
1032
- "epoch": 1.9101123595505618,
1033
- "grad_norm": 0.38263440132141113,
1034
  "learning_rate": 3.077914851215585e-09,
1035
- "logits/chosen": -3.257031202316284,
1036
- "logits/rejected": -2.5390625,
1037
- "logps/chosen": -41.865623474121094,
1038
- "logps/rejected": -176.53750610351562,
1039
- "loss": 0.1582,
1040
- "rewards/accuracies": 0.796093761920929,
1041
- "rewards/chosen": 0.024802017956972122,
1042
- "rewards/margins": 6.796093940734863,
1043
- "rewards/rejected": -6.771484375,
1044
  "step": 680
1045
  },
1046
  {
1047
- "epoch": 1.9382022471910112,
1048
- "grad_norm": 0.4378679096698761,
1049
  "learning_rate": 1.4563744706429514e-09,
1050
- "logits/chosen": -3.2578125,
1051
- "logits/rejected": -2.5419921875,
1052
- "logps/chosen": -40.009376525878906,
1053
- "logps/rejected": -171.91250610351562,
1054
- "loss": 0.1703,
1055
- "rewards/accuracies": 0.7749999761581421,
1056
- "rewards/chosen": 0.07464599609375,
1057
- "rewards/margins": 6.642968654632568,
1058
- "rewards/rejected": -6.567968845367432,
1059
  "step": 690
1060
  },
1061
  {
1062
- "epoch": 1.9662921348314608,
1063
- "grad_norm": 0.19188769161701202,
1064
  "learning_rate": 4.3359745382104405e-10,
1065
- "logits/chosen": -3.271679639816284,
1066
- "logits/rejected": -2.5396485328674316,
1067
- "logps/chosen": -41.52031326293945,
1068
- "logps/rejected": -175.375,
1069
- "loss": 0.1601,
1070
- "rewards/accuracies": 0.7906249761581421,
1071
- "rewards/chosen": 0.01084976177662611,
1072
- "rewards/margins": 6.757421970367432,
1073
- "rewards/rejected": -6.750390529632568,
1074
  "step": 700
1075
  },
1076
  {
1077
- "epoch": 1.99438202247191,
1078
- "grad_norm": 0.2792583107948303,
1079
  "learning_rate": 1.2047760167999133e-11,
1080
- "logits/chosen": -3.2544922828674316,
1081
- "logits/rejected": -2.5517578125,
1082
- "logps/chosen": -42.29999923706055,
1083
- "logps/rejected": -175.3625030517578,
1084
- "loss": 0.1594,
1085
- "rewards/accuracies": 0.7867187261581421,
1086
- "rewards/chosen": -0.014897918328642845,
1087
- "rewards/margins": 6.701171875,
1088
- "rewards/rejected": -6.717187404632568,
1089
  "step": 710
1090
  },
1091
  {
1092
- "epoch": 2.0,
1093
  "step": 712,
1094
  "total_flos": 0.0,
1095
- "train_loss": 0.22515048312672067,
1096
- "train_runtime": 2958.2669,
1097
- "train_samples_per_second": 30.802,
1098
- "train_steps_per_second": 0.241
1099
  }
1100
  ],
1101
  "logging_steps": 10,
1102
  "max_steps": 712,
1103
  "num_input_tokens_seen": 0,
1104
- "num_train_epochs": 2,
1105
  "save_steps": 500,
1106
  "stateful_callbacks": {
1107
  "TrainerControl": {
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
  "eval_steps": 500,
6
  "global_step": 712,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0014044943820224719,
13
+ "grad_norm": 0.9453608393669128,
14
  "learning_rate": 6.9444444444444435e-09,
15
+ "logits/chosen": -3.205078125,
16
+ "logits/rejected": -3.185546875,
17
+ "logps/chosen": -43.59375,
18
+ "logps/rejected": -42.640625,
19
  "loss": 0.6914,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.014044943820224719,
28
+ "grad_norm": 1.151463508605957,
29
  "learning_rate": 6.944444444444444e-08,
30
+ "logits/chosen": -3.24609375,
31
+ "logits/rejected": -3.196831703186035,
32
+ "logps/chosen": -42.70138931274414,
33
+ "logps/rejected": -41.57638931274414,
34
+ "loss": 0.6923,
35
+ "rewards/accuracies": 0.2465277761220932,
36
+ "rewards/chosen": 9.75396906142123e-05,
37
+ "rewards/margins": -0.00013128916907589883,
38
+ "rewards/rejected": 0.00022856394934933633,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.028089887640449437,
43
+ "grad_norm": 0.5042410492897034,
44
  "learning_rate": 1.3888888888888888e-07,
45
+ "logits/chosen": -3.255859375,
46
+ "logits/rejected": -3.21484375,
47
+ "logps/chosen": -42.06718826293945,
48
+ "logps/rejected": -40.62968826293945,
49
+ "loss": 0.6911,
50
+ "rewards/accuracies": 0.30937498807907104,
51
+ "rewards/chosen": 0.006805038545280695,
52
+ "rewards/margins": 0.0023165703751146793,
53
+ "rewards/rejected": 0.004488563630729914,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.042134831460674156,
58
+ "grad_norm": 0.6343653202056885,
59
  "learning_rate": 2.0833333333333333e-07,
60
+ "logits/chosen": -3.262500047683716,
61
+ "logits/rejected": -3.232617139816284,
62
+ "logps/chosen": -41.40625,
63
+ "logps/rejected": -40.610939025878906,
64
+ "loss": 0.6869,
65
+ "rewards/accuracies": 0.4468750059604645,
66
+ "rewards/chosen": 0.027604103088378906,
67
+ "rewards/margins": 0.012437248602509499,
68
+ "rewards/rejected": 0.015170956030488014,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.056179775280898875,
73
+ "grad_norm": 0.7174843549728394,
74
  "learning_rate": 2.7777777777777776e-07,
75
+ "logits/chosen": -3.279296875,
76
+ "logits/rejected": -3.2447266578674316,
77
+ "logps/chosen": -40.29218673706055,
78
+ "logps/rejected": -39.842185974121094,
79
+ "loss": 0.68,
80
+ "rewards/accuracies": 0.5078125,
81
+ "rewards/chosen": 0.03814506530761719,
82
+ "rewards/margins": 0.027390670031309128,
83
+ "rewards/rejected": 0.010743332095444202,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.0702247191011236,
88
+ "grad_norm": 0.5838291645050049,
89
  "learning_rate": 3.472222222222222e-07,
90
+ "logits/chosen": -3.2763671875,
91
+ "logits/rejected": -3.230664014816284,
92
+ "logps/chosen": -40.84687423706055,
93
+ "logps/rejected": -40.31562423706055,
94
+ "loss": 0.6619,
95
+ "rewards/accuracies": 0.609375,
96
+ "rewards/chosen": 0.041875459253787994,
97
+ "rewards/margins": 0.06587791442871094,
98
+ "rewards/rejected": -0.023966407403349876,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.08426966292134831,
103
+ "grad_norm": 0.8917225003242493,
104
  "learning_rate": 4.1666666666666667e-07,
105
+ "logits/chosen": -3.2505860328674316,
106
+ "logits/rejected": -3.1898436546325684,
107
+ "logps/chosen": -41.443748474121094,
108
+ "logps/rejected": -43.939064025878906,
109
+ "loss": 0.6155,
110
+ "rewards/accuracies": 0.6890624761581421,
111
+ "rewards/chosen": 0.011277198791503906,
112
+ "rewards/margins": 0.17209243774414062,
113
+ "rewards/rejected": -0.1607826203107834,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.09831460674157304,
118
+ "grad_norm": 1.416306734085083,
119
  "learning_rate": 4.861111111111111e-07,
120
+ "logits/chosen": -3.2109375,
121
+ "logits/rejected": -3.1263670921325684,
122
+ "logps/chosen": -46.9375,
123
+ "logps/rejected": -53.76874923706055,
124
+ "loss": 0.5336,
125
+ "rewards/accuracies": 0.7015625238418579,
126
+ "rewards/chosen": -0.21380920708179474,
127
+ "rewards/margins": 0.407858282327652,
128
+ "rewards/rejected": -0.6218963861465454,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.11235955056179775,
133
+ "grad_norm": 0.6724388599395752,
134
  "learning_rate": 4.998072590601808e-07,
135
+ "logits/chosen": -3.2593750953674316,
136
+ "logits/rejected": -3.081835985183716,
137
+ "logps/chosen": -44.157814025878906,
138
+ "logps/rejected": -63.279685974121094,
139
+ "loss": 0.405,
140
+ "rewards/accuracies": 0.7203124761581421,
141
+ "rewards/chosen": -0.117925263941288,
142
+ "rewards/margins": 1.0123169422149658,
143
+ "rewards/rejected": -1.1299316883087158,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.12640449438202248,
148
+ "grad_norm": 0.8954005837440491,
149
  "learning_rate": 4.990247583129217e-07,
150
+ "logits/chosen": -3.230273485183716,
151
+ "logits/rejected": -2.9839844703674316,
152
+ "logps/chosen": -45.54218673706055,
153
+ "logps/rejected": -78.46562194824219,
154
+ "loss": 0.3109,
155
+ "rewards/accuracies": 0.7515624761581421,
156
+ "rewards/chosen": -0.15528163313865662,
157
+ "rewards/margins": 1.7075684070587158,
158
+ "rewards/rejected": -1.863037109375,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.1404494382022472,
163
+ "grad_norm": 0.5155877470970154,
164
  "learning_rate": 4.976423351108942e-07,
165
+ "logits/chosen": -3.2529296875,
166
  "logits/rejected": NaN,
167
+ "logps/chosen": -48.826560974121094,
168
+ "logps/rejected": -96.4312515258789,
169
+ "loss": 0.2579,
170
+ "rewards/accuracies": 0.7562500238418579,
171
+ "rewards/chosen": -0.3163391053676605,
172
+ "rewards/margins": 2.467578172683716,
173
+ "rewards/rejected": -2.783496141433716,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.1544943820224719,
178
+ "grad_norm": 0.9068161249160767,
179
  "learning_rate": 4.95663319832678e-07,
180
+ "logits/chosen": -3.236328125,
181
+ "logits/rejected": -2.843554735183716,
182
+ "logps/chosen": -45.657814025878906,
183
+ "logps/rejected": -107.41874694824219,
184
+ "loss": 0.2278,
185
+ "rewards/accuracies": 0.753125011920929,
186
+ "rewards/chosen": -0.1908990889787674,
187
+ "rewards/margins": 3.155956983566284,
188
+ "rewards/rejected": -3.34814453125,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.16853932584269662,
193
+ "grad_norm": 0.5081749558448792,
194
  "learning_rate": 4.930924800994191e-07,
195
+ "logits/chosen": -3.252734422683716,
196
+ "logits/rejected": -2.847460985183716,
197
+ "logps/chosen": -46.87812423706055,
198
+ "logps/rejected": -118.3499984741211,
199
+ "loss": 0.2113,
200
+ "rewards/accuracies": 0.776562511920929,
201
+ "rewards/chosen": -0.22010573744773865,
202
+ "rewards/margins": 3.623046875,
203
+ "rewards/rejected": -3.8433594703674316,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.18258426966292135,
208
+ "grad_norm": 0.6637352108955383,
209
  "learning_rate": 4.899360092892144e-07,
210
+ "logits/chosen": -3.2525391578674316,
211
+ "logits/rejected": -2.763671875,
212
+ "logps/chosen": -47.01093673706055,
213
+ "logps/rejected": -124.7437515258789,
214
+ "loss": 0.2106,
215
+ "rewards/accuracies": 0.7671874761581421,
216
+ "rewards/chosen": -0.23588410019874573,
217
+ "rewards/margins": 3.9537110328674316,
218
+ "rewards/rejected": -4.188086032867432,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.19662921348314608,
223
+ "grad_norm": 0.3586008548736572,
224
  "learning_rate": 4.862015116167195e-07,
225
+ "logits/chosen": -3.271484375,
226
+ "logits/rejected": -2.750781297683716,
227
+ "logps/chosen": -42.41093826293945,
228
+ "logps/rejected": -132.1125030517578,
229
+ "loss": 0.182,
230
+ "rewards/accuracies": 0.785937488079071,
231
+ "rewards/chosen": 0.008197021670639515,
232
+ "rewards/margins": 4.537890434265137,
233
+ "rewards/rejected": -4.529687404632568,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.21067415730337077,
238
+ "grad_norm": 0.7338384985923767,
239
  "learning_rate": 4.81897983813931e-07,
240
+ "logits/chosen": -3.2681641578674316,
241
+ "logits/rejected": NaN,
242
+ "logps/chosen": -39.69062423706055,
243
+ "logps/rejected": -129.3562469482422,
244
+ "loss": 0.1964,
245
+ "rewards/accuracies": 0.7593749761581421,
246
+ "rewards/chosen": 0.10731048882007599,
247
+ "rewards/margins": 4.583203315734863,
248
+ "rewards/rejected": -4.477246284484863,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.2247191011235955,
253
+ "grad_norm": 0.7118776440620422,
254
  "learning_rate": 4.770357934562704e-07,
255
+ "logits/chosen": -3.279296875,
256
+ "logits/rejected": -2.6996092796325684,
257
+ "logps/chosen": -44.157814025878906,
258
+ "logps/rejected": -138.63125610351562,
259
+ "loss": 0.1941,
260
+ "rewards/accuracies": 0.7828124761581421,
261
+ "rewards/chosen": -0.10118408501148224,
262
+ "rewards/margins": 4.762890815734863,
263
+ "rewards/rejected": -4.8642578125,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.23876404494382023,
268
+ "grad_norm": 0.20937258005142212,
269
  "learning_rate": 4.716266539861866e-07,
270
+ "logits/chosen": -3.1851563453674316,
271
+ "logits/rejected": -2.635546922683716,
272
+ "logps/chosen": -51.939064025878906,
273
+ "logps/rejected": -140.15625,
274
+ "loss": 0.1944,
275
+ "rewards/accuracies": 0.7749999761581421,
276
+ "rewards/chosen": -0.5132009387016296,
277
+ "rewards/margins": 4.476171970367432,
278
+ "rewards/rejected": -4.986914157867432,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.25280898876404495,
283
+ "grad_norm": 1.1107814311981201,
284
  "learning_rate": 4.6568359649444796e-07,
285
+ "logits/chosen": -3.2699217796325684,
286
+ "logits/rejected": -2.663281202316284,
287
+ "logps/chosen": -37.44843673706055,
288
+ "logps/rejected": -139.5437469482422,
289
+ "loss": 0.1877,
290
+ "rewards/accuracies": 0.768750011920929,
291
+ "rewards/chosen": 0.19300994277000427,
292
+ "rewards/margins": 5.172265529632568,
293
+ "rewards/rejected": -4.980273246765137,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.26685393258426965,
298
+ "grad_norm": 0.1760079711675644,
299
  "learning_rate": 4.592209383271023e-07,
300
+ "logits/chosen": -3.15234375,
301
+ "logits/rejected": NaN,
302
+ "logps/chosen": -50.7109375,
303
+ "logps/rejected": -142.33749389648438,
304
+ "loss": 0.2011,
305
+ "rewards/accuracies": 0.753125011920929,
306
+ "rewards/chosen": -0.4693801999092102,
307
+ "rewards/margins": 4.672265529632568,
308
+ "rewards/rejected": -5.140820503234863,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.2808988764044944,
313
+ "grad_norm": 0.3540495038032532,
314
  "learning_rate": 4.5225424859373684e-07,
315
+ "logits/chosen": -3.2144532203674316,
316
+ "logits/rejected": -2.646484375,
317
+ "logps/chosen": -44.23749923706055,
318
+ "logps/rejected": -146.5,
319
+ "loss": 0.1823,
320
+ "rewards/accuracies": 0.7749999761581421,
321
+ "rewards/chosen": -0.061879731714725494,
322
+ "rewards/margins": 5.176171779632568,
323
+ "rewards/rejected": -5.240038871765137,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.2949438202247191,
328
+ "grad_norm": 0.4451713263988495,
329
  "learning_rate": 4.448003106601291e-07,
330
+ "logits/chosen": -3.2412109375,
331
+ "logits/rejected": -2.6431641578674316,
332
+ "logps/chosen": -39.69843673706055,
333
+ "logps/rejected": -141.1062469482422,
334
+ "loss": 0.2101,
335
+ "rewards/accuracies": 0.7671874761581421,
336
+ "rewards/chosen": 0.11627502739429474,
337
+ "rewards/margins": 5.123827934265137,
338
+ "rewards/rejected": -5.008593559265137,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.3089887640449438,
343
+ "grad_norm": 1.5930163860321045,
344
  "learning_rate": 4.3687708171564917e-07,
345
+ "logits/chosen": -3.2291016578674316,
346
+ "logits/rejected": -2.604296922683716,
347
+ "logps/chosen": -42.740623474121094,
348
+ "logps/rejected": -145.375,
349
+ "loss": 0.2042,
350
+ "rewards/accuracies": 0.7578125,
351
+ "rewards/chosen": -0.070429228246212,
352
+ "rewards/margins": 5.188672065734863,
353
+ "rewards/rejected": -5.260156154632568,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.32303370786516855,
358
+ "grad_norm": 0.27790066599845886,
359
  "learning_rate": 4.2850364951281705e-07,
360
+ "logits/chosen": -3.2845702171325684,
361
+ "logits/rejected": -2.6654295921325684,
362
+ "logps/chosen": -39.85625076293945,
363
+ "logps/rejected": -149.9250030517578,
364
+ "loss": 0.1742,
365
+ "rewards/accuracies": 0.778124988079071,
366
+ "rewards/chosen": 0.12183837592601776,
367
+ "rewards/margins": 5.572656154632568,
368
+ "rewards/rejected": -5.451562404632568,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.33707865168539325,
373
+ "grad_norm": 0.32155928015708923,
374
  "learning_rate": 4.1970018638323547e-07,
375
+ "logits/chosen": -3.26953125,
376
+ "logits/rejected": -2.6566405296325684,
377
+ "logps/chosen": -41.8671875,
378
+ "logps/rejected": -152.64999389648438,
379
+ "loss": 0.174,
380
+ "rewards/accuracies": 0.7875000238418579,
381
+ "rewards/chosen": 0.051790811121463776,
382
+ "rewards/margins": 5.608593940734863,
383
+ "rewards/rejected": -5.559179782867432,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.351123595505618,
388
+ "grad_norm": 0.2504253685474396,
389
  "learning_rate": 4.1048790064067573e-07,
390
+ "logits/chosen": -3.254687547683716,
391
+ "logits/rejected": -2.630078077316284,
392
+ "logps/chosen": -40.02812576293945,
393
+ "logps/rejected": -154.9250030517578,
394
+ "loss": 0.1722,
395
+ "rewards/accuracies": 0.785937488079071,
396
+ "rewards/chosen": 0.126708984375,
397
+ "rewards/margins": 5.812109470367432,
398
+ "rewards/rejected": -5.681640625,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.3651685393258427,
403
+ "grad_norm": 1.5456242561340332,
404
  "learning_rate": 4.0088898548839285e-07,
405
+ "logits/chosen": -3.2398438453674316,
406
+ "logits/rejected": -2.6322264671325684,
407
+ "logps/chosen": -42.142189025878906,
408
+ "logps/rejected": -149.5749969482422,
409
+ "loss": 0.1996,
410
+ "rewards/accuracies": 0.7562500238418579,
411
+ "rewards/chosen": -0.08585663139820099,
412
+ "rewards/margins": 5.431640625,
413
+ "rewards/rejected": -5.516211032867432,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.3792134831460674,
418
+ "grad_norm": 0.1563502699136734,
419
  "learning_rate": 3.9092656555375414e-07,
420
+ "logits/chosen": -3.268359422683716,
421
+ "logits/rejected": -2.6371092796325684,
422
+ "logps/chosen": -41.34375,
423
+ "logps/rejected": -160.625,
424
+ "loss": 0.159,
425
+ "rewards/accuracies": 0.796875,
426
+ "rewards/chosen": 0.07463989406824112,
427
+ "rewards/margins": 6.014843940734863,
428
+ "rewards/rejected": -5.940234184265137,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.39325842696629215,
433
+ "grad_norm": 0.414302796125412,
434
  "learning_rate": 3.806246411789872e-07,
435
+ "logits/chosen": -3.282421827316284,
436
+ "logits/rejected": NaN,
437
+ "logps/chosen": -39.970314025878906,
438
+ "logps/rejected": -151.84375,
439
+ "loss": 0.1905,
440
+ "rewards/accuracies": 0.7640625238418579,
441
+ "rewards/chosen": 0.1959686279296875,
442
+ "rewards/margins": 5.774609565734863,
443
+ "rewards/rejected": -5.579492092132568,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.40730337078651685,
448
+ "grad_norm": 0.11994462460279465,
449
  "learning_rate": 3.700080306022528e-07,
450
+ "logits/chosen": -3.199023485183716,
451
+ "logits/rejected": -2.582226514816284,
452
+ "logps/chosen": -51.41093826293945,
453
+ "logps/rejected": -164.3125,
454
+ "loss": 0.1673,
455
+ "rewards/accuracies": 0.7890625,
456
+ "rewards/chosen": -0.46795958280563354,
457
+ "rewards/margins": 5.684179782867432,
458
+ "rewards/rejected": -6.149609565734863,
459
  "step": 290
460
  },
461
  {
462
+ "epoch": 0.42134831460674155,
463
+ "grad_norm": 0.3142966330051422,
464
  "learning_rate": 3.5910231016833546e-07,
465
+ "logits/chosen": -3.1810545921325684,
466
+ "logits/rejected": -2.5703125,
467
+ "logps/chosen": -50.90625,
468
+ "logps/rejected": -161.7375030517578,
469
+ "loss": 0.1775,
470
+ "rewards/accuracies": 0.7796875238418579,
471
+ "rewards/chosen": -0.4352920651435852,
472
+ "rewards/margins": 5.612890720367432,
473
+ "rewards/rejected": -6.050000190734863,
474
  "step": 300
475
  },
476
  {
477
+ "epoch": 0.4353932584269663,
478
+ "grad_norm": 0.5336220264434814,
479
  "learning_rate": 3.4793375271298895e-07,
480
+ "logits/chosen": -3.252148389816284,
481
+ "logits/rejected": NaN,
482
+ "logps/chosen": -41.959373474121094,
483
+ "logps/rejected": -157.24374389648438,
484
+ "loss": 0.1855,
485
+ "rewards/accuracies": 0.768750011920929,
486
+ "rewards/chosen": 0.02679443359375,
487
+ "rewards/margins": 5.8720703125,
488
+ "rewards/rejected": -5.84375,
489
  "step": 310
490
  },
491
  {
492
+ "epoch": 0.449438202247191,
493
+ "grad_norm": 0.11426942050457001,
494
  "learning_rate": 3.3652926426937325e-07,
495
+ "logits/chosen": -3.280078172683716,
496
+ "logits/rejected": -2.6470704078674316,
497
+ "logps/chosen": -40.0546875,
498
+ "logps/rejected": -156.4812469482422,
499
+ "loss": 0.1744,
500
+ "rewards/accuracies": 0.770312488079071,
501
+ "rewards/chosen": 0.06229095533490181,
502
+ "rewards/margins": 5.884179592132568,
503
+ "rewards/rejected": -5.8212890625,
504
  "step": 320
505
  },
506
  {
507
+ "epoch": 0.46348314606741575,
508
+ "grad_norm": 0.12852512300014496,
509
  "learning_rate": 3.249163192490642e-07,
510
+ "logits/chosen": -3.245898485183716,
511
+ "logits/rejected": -2.5863280296325684,
512
+ "logps/chosen": -43.52031326293945,
513
+ "logps/rejected": -163.125,
514
+ "loss": 0.1654,
515
+ "rewards/accuracies": 0.785937488079071,
516
+ "rewards/chosen": -0.09057464450597763,
517
+ "rewards/margins": 6.042578220367432,
518
+ "rewards/rejected": -6.135937690734863,
519
  "step": 330
520
  },
521
  {
522
+ "epoch": 0.47752808988764045,
523
+ "grad_norm": 1.0368801355361938,
524
  "learning_rate": 3.1312289425378944e-07,
525
+ "logits/chosen": -3.237109422683716,
526
+ "logits/rejected": -2.5757813453674316,
527
+ "logps/chosen": -45.65156173706055,
528
+ "logps/rejected": -166.43124389648438,
529
+ "loss": 0.1851,
530
+ "rewards/accuracies": 0.7718750238418579,
531
+ "rewards/chosen": -0.196772962808609,
532
+ "rewards/margins": 6.102734565734863,
533
+ "rewards/rejected": -6.302343845367432,
534
  "step": 340
535
  },
536
  {
537
+ "epoch": 0.49157303370786515,
538
+ "grad_norm": 0.2768089473247528,
539
  "learning_rate": 3.011774006773449e-07,
540
+ "logits/chosen": -3.1851563453674316,
541
+ "logits/rejected": -2.553515672683716,
542
+ "logps/chosen": -50.12968826293945,
543
+ "logps/rejected": -172.3125,
544
+ "loss": 0.1616,
545
+ "rewards/accuracies": 0.801562488079071,
546
+ "rewards/chosen": -0.33214110136032104,
547
+ "rewards/margins": 6.178515434265137,
548
+ "rewards/rejected": -6.510156154632568,
549
  "step": 350
550
  },
551
  {
552
+ "epoch": 0.5056179775280899,
553
+ "grad_norm": 0.5607307553291321,
554
  "learning_rate": 2.8910861626005773e-07,
555
+ "logits/chosen": -3.2269530296325684,
556
+ "logits/rejected": -2.5611329078674316,
557
+ "logps/chosen": -45.09375,
558
+ "logps/rejected": -164.16250610351562,
559
+ "loss": 0.1745,
560
+ "rewards/accuracies": 0.7718750238418579,
561
+ "rewards/chosen": -0.21467895805835724,
562
+ "rewards/margins": 6.003515720367432,
563
+ "rewards/rejected": -6.216406345367432,
564
  "step": 360
565
  },
566
  {
567
+ "epoch": 0.5196629213483146,
568
+ "grad_norm": 0.45973560214042664,
569
  "learning_rate": 2.7694561576068983e-07,
570
+ "logits/chosen": -3.203906297683716,
571
+ "logits/rejected": -2.5244140625,
572
+ "logps/chosen": -46.931251525878906,
573
+ "logps/rejected": -171.2375030517578,
574
+ "loss": 0.1627,
575
+ "rewards/accuracies": 0.796875,
576
+ "rewards/chosen": -0.20502586662769318,
577
+ "rewards/margins": 6.284570217132568,
578
+ "rewards/rejected": -6.489062309265137,
579
  "step": 370
580
  },
581
  {
582
+ "epoch": 0.5337078651685393,
583
+ "grad_norm": 0.33313286304473877,
584
  "learning_rate": 2.647177009127972e-07,
585
+ "logits/chosen": -3.1996092796325684,
586
+ "logits/rejected": -2.5083985328674316,
587
+ "logps/chosen": -46.064064025878906,
588
+ "logps/rejected": -171.14999389648438,
589
+ "loss": 0.1645,
590
+ "rewards/accuracies": 0.7984374761581421,
591
+ "rewards/chosen": -0.1926528960466385,
592
+ "rewards/margins": 6.312890529632568,
593
+ "rewards/rejected": -6.504101753234863,
594
  "step": 380
595
  },
596
  {
597
+ "epoch": 0.547752808988764,
598
+ "grad_norm": 0.3406722843647003,
599
  "learning_rate": 2.524543298342874e-07,
600
+ "logits/chosen": -3.216992139816284,
601
+ "logits/rejected": -2.5326170921325684,
602
+ "logps/chosen": -43.662498474121094,
603
+ "logps/rejected": -168.7937469482422,
604
+ "loss": 0.1589,
605
+ "rewards/accuracies": 0.792187511920929,
606
+ "rewards/chosen": -0.07064209133386612,
607
+ "rewards/margins": 6.324023246765137,
608
+ "rewards/rejected": -6.3935546875,
609
  "step": 390
610
  },
611
  {
612
+ "epoch": 0.5617977528089888,
613
+ "grad_norm": 0.2776348292827606,
614
  "learning_rate": 2.401850460602329e-07,
615
+ "logits/chosen": -3.2457032203674316,
616
+ "logits/rejected": -2.5503907203674316,
617
+ "logps/chosen": -41.400001525878906,
618
+ "logps/rejected": -166.88125610351562,
619
+ "loss": 0.1747,
620
+ "rewards/accuracies": 0.7828124761581421,
621
+ "rewards/chosen": 0.02814788743853569,
622
+ "rewards/margins": 6.317187309265137,
623
+ "rewards/rejected": -6.288281440734863,
624
  "step": 400
625
  },
626
  {
627
+ "epoch": 0.5758426966292135,
628
+ "grad_norm": 1.0503939390182495,
629
  "learning_rate": 2.2793940736990766e-07,
630
+ "logits/chosen": -3.241992235183716,
631
+ "logits/rejected": -2.546093702316284,
632
+ "logps/chosen": -40.43437576293945,
633
+ "logps/rejected": -163.3874969482422,
634
+ "loss": 0.1881,
635
+ "rewards/accuracies": 0.7718750238418579,
636
+ "rewards/chosen": 0.005574035458266735,
637
+ "rewards/margins": 6.197851657867432,
638
+ "rewards/rejected": -6.190625190734863,
639
  "step": 410
640
  },
641
  {
642
+ "epoch": 0.5898876404494382,
643
+ "grad_norm": 1.4323946237564087,
644
  "learning_rate": 2.1574691457950803e-07,
645
+ "logits/chosen": -3.208203077316284,
646
  "logits/rejected": NaN,
647
+ "logps/chosen": -43.95781326293945,
648
+ "logps/rejected": -171.10000610351562,
649
+ "loss": 0.1664,
650
+ "rewards/accuracies": 0.7890625,
651
+ "rewards/chosen": -0.07436218112707138,
652
+ "rewards/margins": 6.463086128234863,
653
+ "rewards/rejected": -6.539453029632568,
654
  "step": 420
655
  },
656
  {
657
+ "epoch": 0.6039325842696629,
658
+ "grad_norm": 0.678728461265564,
659
  "learning_rate": 2.036369404721023e-07,
660
+ "logits/chosen": -3.1888670921325684,
661
+ "logits/rejected": NaN,
662
+ "logps/chosen": -47.20624923706055,
663
+ "logps/rejected": -169.1062469482422,
664
+ "loss": 0.166,
665
+ "rewards/accuracies": 0.778124988079071,
666
+ "rewards/chosen": -0.290365606546402,
667
+ "rewards/margins": 6.1806640625,
668
+ "rewards/rejected": -6.469531059265137,
669
  "step": 430
670
  },
671
  {
672
+ "epoch": 0.6179775280898876,
673
+ "grad_norm": 0.20119936764240265,
674
  "learning_rate": 1.9163865903602372e-07,
675
+ "logits/chosen": -3.2255859375,
676
+ "logits/rejected": -2.520703077316284,
677
+ "logps/chosen": -45.240623474121094,
678
+ "logps/rejected": -167.2624969482422,
679
+ "loss": 0.1816,
680
+ "rewards/accuracies": 0.7671874761581421,
681
+ "rewards/chosen": -0.21324768662452698,
682
+ "rewards/margins": 6.177538871765137,
683
+ "rewards/rejected": -6.390625,
684
  "step": 440
685
  },
686
  {
687
+ "epoch": 0.6320224719101124,
688
+ "grad_norm": 0.7942313551902771,
689
  "learning_rate": 1.7978097518217702e-07,
690
+ "logits/chosen": -3.1763672828674316,
691
+ "logits/rejected": -2.5126953125,
692
+ "logps/chosen": -49.80937576293945,
693
+ "logps/rejected": -172.6374969482422,
694
+ "loss": 0.1685,
695
+ "rewards/accuracies": 0.796875,
696
+ "rewards/chosen": -0.3480590879917145,
697
+ "rewards/margins": 6.218359470367432,
698
+ "rewards/rejected": -6.568554878234863,
699
  "step": 450
700
  },
701
  {
702
+ "epoch": 0.6460674157303371,
703
+ "grad_norm": 0.9307264685630798,
704
  "learning_rate": 1.6809245510957666e-07,
705
+ "logits/chosen": -3.2232422828674316,
706
+ "logits/rejected": -2.4925780296325684,
707
+ "logps/chosen": -44.875,
708
+ "logps/rejected": -171.28750610351562,
709
+ "loss": 0.1721,
710
+ "rewards/accuracies": 0.7796875238418579,
711
+ "rewards/chosen": -0.14088821411132812,
712
+ "rewards/margins": 6.381640434265137,
713
+ "rewards/rejected": -6.520312309265137,
714
  "step": 460
715
  },
716
  {
717
+ "epoch": 0.6601123595505618,
718
+ "grad_norm": 1.6537760496139526,
719
  "learning_rate": 1.5660125748687093e-07,
720
+ "logits/chosen": -3.2333984375,
721
+ "logits/rejected": -2.518749952316284,
722
+ "logps/chosen": -43.334373474121094,
723
+ "logps/rejected": -167.8125,
724
+ "loss": 0.1794,
725
+ "rewards/accuracies": 0.770312488079071,
726
+ "rewards/chosen": -0.06363830715417862,
727
+ "rewards/margins": 6.300976753234863,
728
+ "rewards/rejected": -6.363671779632568,
729
  "step": 470
730
  },
731
  {
732
+ "epoch": 0.6741573033707865,
733
+ "grad_norm": 0.6378122568130493,
734
  "learning_rate": 1.4533506561564305e-07,
735
+ "logits/chosen": -3.2416014671325684,
736
+ "logits/rejected": -2.546093702316284,
737
+ "logps/chosen": -39.32500076293945,
738
+ "logps/rejected": -166.10000610351562,
739
+ "loss": 0.1712,
740
+ "rewards/accuracies": 0.7890625,
741
+ "rewards/chosen": 0.16349944472312927,
742
+ "rewards/margins": 6.383008003234863,
743
+ "rewards/rejected": -6.220312595367432,
744
  "step": 480
745
  },
746
  {
747
+ "epoch": 0.6882022471910112,
748
+ "grad_norm": 0.21734459698200226,
749
  "learning_rate": 1.343210207389125e-07,
750
+ "logits/chosen": -3.216015577316284,
751
+ "logits/rejected": NaN,
752
+ "logps/chosen": -39.485939025878906,
753
+ "logps/rejected": -162.99374389648438,
754
+ "loss": 0.1864,
755
+ "rewards/accuracies": 0.7640625238418579,
756
+ "rewards/chosen": 0.08720092475414276,
757
+ "rewards/margins": 6.254492282867432,
758
+ "rewards/rejected": -6.165625095367432,
759
  "step": 490
760
  },
761
  {
762
+ "epoch": 0.702247191011236,
763
+ "grad_norm": 0.3136639893054962,
764
  "learning_rate": 1.2358565665550387e-07,
765
+ "logits/chosen": -3.216015577316284,
766
  "logits/rejected": NaN,
767
+ "logps/chosen": -42.5078125,
768
+ "logps/rejected": -166.36874389648438,
769
+ "loss": 0.1841,
770
+ "rewards/accuracies": 0.7749999761581421,
771
+ "rewards/chosen": 0.017351532354950905,
772
+ "rewards/margins": 6.283398628234863,
773
+ "rewards/rejected": -6.266992092132568,
774
  "step": 500
775
  },
776
  {
777
+ "epoch": 0.7162921348314607,
778
+ "grad_norm": 0.07860163599252701,
779
  "learning_rate": 1.1315483579780094e-07,
780
+ "logits/chosen": -3.210742235183716,
781
+ "logits/rejected": -2.499218702316284,
782
+ "logps/chosen": -41.615623474121094,
783
+ "logps/rejected": -166.30624389648438,
784
+ "loss": 0.1831,
785
+ "rewards/accuracies": 0.7828124761581421,
786
+ "rewards/chosen": -0.01008453406393528,
787
+ "rewards/margins": 6.287304878234863,
788
+ "rewards/rejected": -6.297070503234863,
789
  "step": 510
790
  },
791
  {
792
+ "epoch": 0.7303370786516854,
793
+ "grad_norm": 0.173310786485672,
794
  "learning_rate": 1.0305368692688174e-07,
795
+ "logits/chosen": -3.195507764816284,
796
+ "logits/rejected": -2.504101514816284,
797
+ "logps/chosen": -45.17656326293945,
798
+ "logps/rejected": -168.6062469482422,
799
+ "loss": 0.1724,
800
+ "rewards/accuracies": 0.7828124761581421,
801
+ "rewards/chosen": -0.14428405463695526,
802
+ "rewards/margins": 6.204297065734863,
803
+ "rewards/rejected": -6.347070217132568,
804
  "step": 520
805
  },
806
  {
807
+ "epoch": 0.7443820224719101,
808
+ "grad_norm": 0.3090410828590393,
809
  "learning_rate": 9.330654459513266e-08,
810
+ "logits/chosen": -3.169726610183716,
811
+ "logits/rejected": -2.4736328125,
812
+ "logps/chosen": -46.984375,
813
+ "logps/rejected": -162.36874389648438,
814
+ "loss": 0.2013,
815
+ "rewards/accuracies": 0.7515624761581421,
816
+ "rewards/chosen": -0.3089355528354645,
817
+ "rewards/margins": 5.836718559265137,
818
+ "rewards/rejected": -6.145312309265137,
819
  "step": 530
820
  },
821
  {
822
+ "epoch": 0.7584269662921348,
823
+ "grad_norm": 0.249381884932518,
824
  "learning_rate": 8.393689052217964e-08,
825
+ "logits/chosen": -3.1357421875,
826
+ "logits/rejected": -2.484179735183716,
827
+ "logps/chosen": -49.49687576293945,
828
+ "logps/rejected": -167.35000610351562,
829
+ "loss": 0.1759,
830
  "rewards/accuracies": 0.770312488079071,
831
+ "rewards/chosen": -0.393869012594223,
832
+ "rewards/margins": 5.953320503234863,
833
+ "rewards/rejected": -6.347460746765137,
834
  "step": 540
835
  },
836
  {
837
+ "epoch": 0.7724719101123596,
838
+ "grad_norm": 0.2429146021604538,
839
  "learning_rate": 7.49672970253691e-08,
840
+ "logits/chosen": -3.1318359375,
841
+ "logits/rejected": -2.4886717796325684,
842
+ "logps/chosen": -52.857810974121094,
843
+ "logps/rejected": -171.19375610351562,
844
+ "loss": 0.1722,
845
+ "rewards/accuracies": 0.7953125238418579,
846
+ "rewards/chosen": -0.4801391661167145,
847
+ "rewards/margins": 5.990429878234863,
848
+ "rewards/rejected": -6.468359470367432,
849
  "step": 550
850
  },
851
  {
852
+ "epoch": 0.7865168539325843,
853
+ "grad_norm": 0.5875958204269409,
854
  "learning_rate": 6.641937264107867e-08,
855
+ "logits/chosen": -3.171093702316284,
856
+ "logits/rejected": -2.4808592796325684,
857
+ "logps/chosen": -47.681251525878906,
858
+ "logps/rejected": -174.71875,
859
+ "loss": 0.1605,
860
+ "rewards/accuracies": 0.7984374761581421,
861
+ "rewards/chosen": -0.26392096281051636,
862
+ "rewards/margins": 6.382421970367432,
863
+ "rewards/rejected": -6.64453125,
864
  "step": 560
865
  },
866
  {
867
+ "epoch": 0.800561797752809,
868
+ "grad_norm": 0.37553030252456665,
869
  "learning_rate": 5.831371006785962e-08,
870
+ "logits/chosen": -3.1826171875,
871
+ "logits/rejected": -2.4847655296325684,
872
+ "logps/chosen": -48.498435974121094,
873
+ "logps/rejected": -169.1125030517578,
874
+ "loss": 0.1773,
875
+ "rewards/accuracies": 0.7749999761581421,
876
+ "rewards/chosen": -0.3075141906738281,
877
+ "rewards/margins": 6.108984470367432,
878
+ "rewards/rejected": -6.417578220367432,
879
  "step": 570
880
  },
881
  {
882
+ "epoch": 0.8146067415730337,
883
+ "grad_norm": 0.31017985939979553,
884
  "learning_rate": 5.066983655682325e-08,
885
+ "logits/chosen": -3.141796827316284,
886
+ "logits/rejected": -2.505859375,
887
+ "logps/chosen": -50.5703125,
888
+ "logps/rejected": -166.83749389648438,
889
+ "loss": 0.1899,
890
+ "rewards/accuracies": 0.7671874761581421,
891
+ "rewards/chosen": -0.4396209716796875,
892
+ "rewards/margins": 5.888671875,
893
+ "rewards/rejected": -6.325585842132568,
894
  "step": 580
895
  },
896
  {
897
+ "epoch": 0.8286516853932584,
898
+ "grad_norm": 0.8791071772575378,
899
  "learning_rate": 4.3506166868781755e-08,
900
+ "logits/chosen": -3.1845703125,
901
+ "logits/rejected": -2.4937500953674316,
902
+ "logps/chosen": -48.103126525878906,
903
+ "logps/rejected": -170.8625030517578,
904
+ "loss": 0.1698,
905
+ "rewards/accuracies": 0.785937488079071,
906
+ "rewards/chosen": -0.27438658475875854,
907
+ "rewards/margins": 6.2060546875,
908
+ "rewards/rejected": -6.482421875,
909
  "step": 590
910
  },
911
  {
912
+ "epoch": 0.8426966292134831,
913
+ "grad_norm": 0.6104283928871155,
914
  "learning_rate": 3.683995891147695e-08,
915
+ "logits/chosen": -3.176953077316284,
916
  "logits/rejected": NaN,
917
+ "logps/chosen": -45.896873474121094,
918
+ "logps/rejected": -168.60000610351562,
919
+ "loss": 0.1721,
920
  "rewards/accuracies": 0.784375011920929,
921
+ "rewards/chosen": -0.21481475234031677,
922
+ "rewards/margins": 6.201952934265137,
923
+ "rewards/rejected": -6.416015625,
924
  "step": 600
925
  },
926
  {
927
+ "epoch": 0.8567415730337079,
928
+ "grad_norm": 0.31073251366615295,
929
  "learning_rate": 3.0687272163768986e-08,
930
+ "logits/chosen": -3.1650390625,
931
+ "logits/rejected": -2.513671875,
932
+ "logps/chosen": -46.66093826293945,
933
+ "logps/rejected": -170.8625030517578,
934
+ "loss": 0.1649,
935
+ "rewards/accuracies": 0.7875000238418579,
936
+ "rewards/chosen": -0.24439087510108948,
937
+ "rewards/margins": 6.258593559265137,
938
+ "rewards/rejected": -6.502148628234863,
939
  "step": 610
940
  },
941
  {
942
+ "epoch": 0.8707865168539326,
943
+ "grad_norm": 0.2421959638595581,
944
  "learning_rate": 2.5062928986944676e-08,
945
+ "logits/chosen": -3.170117139816284,
946
+ "logits/rejected": -2.5238280296325684,
947
+ "logps/chosen": -46.318748474121094,
948
+ "logps/rejected": -171.50625610351562,
949
+ "loss": 0.1595,
950
+ "rewards/accuracies": 0.7953125238418579,
951
+ "rewards/chosen": -0.15370789170265198,
952
+ "rewards/margins": 6.348242282867432,
953
+ "rewards/rejected": -6.501953125,
954
  "step": 620
955
  },
956
  {
957
+ "epoch": 0.8848314606741573,
958
+ "grad_norm": 0.292059987783432,
959
  "learning_rate": 1.9980478916351296e-08,
960
+ "logits/chosen": -3.185742139816284,
961
+ "logits/rejected": -2.491992235183716,
962
+ "logps/chosen": -46.803123474121094,
963
+ "logps/rejected": -171.0437469482422,
964
+ "loss": 0.1893,
965
+ "rewards/accuracies": 0.778124988079071,
966
+ "rewards/chosen": -0.1725509613752365,
967
+ "rewards/margins": 6.320703029632568,
968
+ "rewards/rejected": -6.491796970367432,
969
  "step": 630
970
  },
971
  {
972
+ "epoch": 0.898876404494382,
973
+ "grad_norm": 0.6732786297798157,
974
  "learning_rate": 1.5452166019378987e-08,
975
+ "logits/chosen": -3.2027344703674316,
976
+ "logits/rejected": -2.5123047828674316,
977
+ "logps/chosen": -46.25,
978
+ "logps/rejected": -172.4250030517578,
979
+ "loss": 0.1712,
980
+ "rewards/accuracies": 0.793749988079071,
981
+ "rewards/chosen": -0.16961669921875,
982
+ "rewards/margins": 6.3662109375,
983
+ "rewards/rejected": -6.537890434265137,
984
  "step": 640
985
  },
986
  {
987
+ "epoch": 0.9129213483146067,
988
+ "grad_norm": 0.25237613916397095,
989
  "learning_rate": 1.1488899398429896e-08,
990
+ "logits/chosen": -3.2007813453674316,
991
+ "logits/rejected": -2.5160155296325684,
992
+ "logps/chosen": -43.58906173706055,
993
+ "logps/rejected": -170.09375,
994
+ "loss": 0.1787,
995
+ "rewards/accuracies": 0.7749999761581421,
996
+ "rewards/chosen": -0.10042724758386612,
997
+ "rewards/margins": 6.383008003234863,
998
+ "rewards/rejected": -6.482421875,
999
  "step": 650
1000
  },
1001
  {
1002
+ "epoch": 0.9269662921348315,
1003
+ "grad_norm": 0.09695342183113098,
1004
  "learning_rate": 8.100226909935059e-09,
1005
+ "logits/chosen": -3.2056641578674316,
1006
+ "logits/rejected": -2.5228514671325684,
1007
+ "logps/chosen": -43.27656173706055,
1008
+ "logps/rejected": -166.16250610351562,
1009
+ "loss": 0.1876,
1010
+ "rewards/accuracies": 0.7718750238418579,
1011
+ "rewards/chosen": -0.07465209811925888,
1012
+ "rewards/margins": 6.197656154632568,
1013
+ "rewards/rejected": -6.272265434265137,
1014
  "step": 660
1015
  },
1016
  {
1017
+ "epoch": 0.9410112359550562,
1018
+ "grad_norm": 0.15045014023780823,
1019
  "learning_rate": 5.2943121627319346e-09,
1020
+ "logits/chosen": -3.207812547683716,
1021
+ "logits/rejected": -2.5152344703674316,
1022
+ "logps/chosen": -43.06562423706055,
1023
+ "logps/rejected": -166.84375,
1024
+ "loss": 0.1818,
1025
  "rewards/accuracies": 0.770312488079071,
1026
+ "rewards/chosen": -0.07712707668542862,
1027
+ "rewards/margins": 6.268164157867432,
1028
+ "rewards/rejected": -6.342382907867432,
1029
  "step": 670
1030
  },
1031
  {
1032
+ "epoch": 0.9550561797752809,
1033
+ "grad_norm": 0.31066492199897766,
1034
  "learning_rate": 3.077914851215585e-09,
1035
+ "logits/chosen": -3.2230467796325684,
1036
+ "logits/rejected": -2.5365233421325684,
1037
+ "logps/chosen": -44.46562576293945,
1038
+ "logps/rejected": -170.8625030517578,
1039
+ "loss": 0.1733,
1040
+ "rewards/accuracies": 0.7875000238418579,
1041
+ "rewards/chosen": -0.07046356052160263,
1042
+ "rewards/margins": 6.385156154632568,
1043
+ "rewards/rejected": -6.457812309265137,
1044
  "step": 680
1045
  },
1046
  {
1047
+ "epoch": 0.9691011235955056,
1048
+ "grad_norm": 0.11322323232889175,
1049
  "learning_rate": 1.4563744706429514e-09,
1050
+ "logits/chosen": -3.216015577316284,
1051
+ "logits/rejected": -2.50390625,
1052
+ "logps/chosen": -41.93281173706055,
1053
+ "logps/rejected": -168.16250610351562,
1054
+ "loss": 0.1693,
1055
+ "rewards/accuracies": 0.778124988079071,
1056
+ "rewards/chosen": -0.01323547400534153,
1057
+ "rewards/margins": 6.383593559265137,
1058
+ "rewards/rejected": -6.3974609375,
1059
  "step": 690
1060
  },
1061
  {
1062
+ "epoch": 0.9831460674157303,
1063
+ "grad_norm": 0.28659555315971375,
1064
  "learning_rate": 4.3359745382104405e-10,
1065
+ "logits/chosen": -3.21484375,
1066
+ "logits/rejected": NaN,
1067
+ "logps/chosen": -42.96406173706055,
1068
+ "logps/rejected": -169.0,
1069
+ "loss": 0.1708,
1070
+ "rewards/accuracies": 0.7749999761581421,
1071
+ "rewards/chosen": -0.045589447021484375,
1072
+ "rewards/margins": 6.397265434265137,
1073
+ "rewards/rejected": -6.444140434265137,
1074
  "step": 700
1075
  },
1076
  {
1077
+ "epoch": 0.9971910112359551,
1078
+ "grad_norm": 0.5906934142112732,
1079
  "learning_rate": 1.2047760167999133e-11,
1080
+ "logits/chosen": -3.2037110328674316,
1081
+ "logits/rejected": -2.529101610183716,
1082
+ "logps/chosen": -44.228126525878906,
1083
+ "logps/rejected": -170.83749389648438,
1084
+ "loss": 0.1657,
1085
+ "rewards/accuracies": 0.7984374761581421,
1086
+ "rewards/chosen": -0.09162139892578125,
1087
+ "rewards/margins": 6.3896484375,
1088
+ "rewards/rejected": -6.482421875,
1089
  "step": 710
1090
  },
1091
  {
1092
+ "epoch": 1.0,
1093
  "step": 712,
1094
  "total_flos": 0.0,
1095
+ "train_loss": 0.23233672156092827,
1096
+ "train_runtime": 2773.067,
1097
+ "train_samples_per_second": 16.43,
1098
+ "train_steps_per_second": 0.257
1099
  }
1100
  ],
1101
  "logging_steps": 10,
1102
  "max_steps": 712,
1103
  "num_input_tokens_seen": 0,
1104
+ "num_train_epochs": 1,
1105
  "save_steps": 500,
1106
  "stateful_callbacks": {
1107
  "TrainerControl": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e47b8bd96f5ed212913b9d45c5a069e52c690acee03a5d1528a81d6eb90ec43
3
  size 7800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e31315ddeb7aa7465f2bd16d20b45c43e929c9c6b6b9f79d4646ff479ab0b680
3
  size 7800