li-muyang commited on
Commit
76e3ce7
·
verified ·
1 Parent(s): 6a2a0d2

Model save

Browse files
Files changed (5) hide show
  1. README.md +17 -25
  2. all_results.json +5 -5
  3. generation_config.json +1 -1
  4. train_results.json +5 -5
  5. trainer_state.json +593 -1441
README.md CHANGED
@@ -16,15 +16,15 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.5060
20
- - Rewards/chosen: -0.9456
21
- - Rewards/rejected: -1.8257
22
- - Rewards/accuracies: 0.7579
23
- - Rewards/margins: 0.8801
24
- - Logps/rejected: -444.3302
25
- - Logps/chosen: -382.1980
26
- - Logits/rejected: 0.8653
27
- - Logits/chosen: 0.4899
28
 
29
  ## Model description
30
 
@@ -45,13 +45,13 @@ More information needed
45
  The following hyperparameters were used during training:
46
  - learning_rate: 5e-07
47
  - train_batch_size: 4
48
- - eval_batch_size: 4
49
  - seed: 42
50
  - distributed_type: multi-GPU
51
  - num_devices: 8
52
- - gradient_accumulation_steps: 2
53
- - total_train_batch_size: 64
54
- - total_eval_batch_size: 32
55
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
  - lr_scheduler_type: cosine
57
  - lr_scheduler_warmup_ratio: 0.1
@@ -61,20 +61,12 @@ The following hyperparameters were used during training:
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
- | 0.6703 | 0.1047 | 100 | 0.6695 | 0.0173 | -0.0408 | 0.6825 | 0.0581 | -265.8378 | -285.9061 | -0.4757 | -0.5813 |
65
- | 0.5922 | 0.2093 | 200 | 0.5902 | -0.4616 | -0.8504 | 0.7063 | 0.3888 | -346.7961 | -333.7917 | -0.6443 | -0.7411 |
66
- | 0.5592 | 0.3140 | 300 | 0.5462 | -0.6144 | -1.2154 | 0.7421 | 0.6010 | -383.3018 | -349.0777 | -0.2679 | -0.4330 |
67
- | 0.5461 | 0.4186 | 400 | 0.5323 | -0.7030 | -1.3568 | 0.7381 | 0.6539 | -397.4421 | -357.9295 | -0.0100 | -0.2412 |
68
- | 0.5211 | 0.5233 | 500 | 0.5215 | -1.0874 | -1.8737 | 0.7341 | 0.7863 | -449.1320 | -396.3762 | 0.5346 | 0.2433 |
69
- | 0.4932 | 0.6279 | 600 | 0.5180 | -0.7257 | -1.4962 | 0.7540 | 0.7705 | -411.3827 | -360.2088 | 0.4235 | 0.1246 |
70
- | 0.4891 | 0.7326 | 700 | 0.5097 | -0.9618 | -1.8012 | 0.7579 | 0.8394 | -441.8806 | -383.8190 | 0.7266 | 0.3793 |
71
- | 0.5052 | 0.8373 | 800 | 0.5067 | -0.9279 | -1.7930 | 0.7540 | 0.8651 | -441.0578 | -380.4258 | 0.8224 | 0.4548 |
72
- | 0.4946 | 0.9419 | 900 | 0.5060 | -0.9456 | -1.8257 | 0.7579 | 0.8801 | -444.3302 | -382.1980 | 0.8653 | 0.4899 |
73
 
74
 
75
  ### Framework versions
76
 
77
- - Transformers 4.45.0
78
- - Pytorch 2.2.2+rocm5.7
79
- - Datasets 3.2.0
80
  - Tokenizers 0.20.3
 
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.6284
20
+ - Rewards/chosen: -0.6611
21
+ - Rewards/rejected: -0.8975
22
+ - Rewards/accuracies: 0.6406
23
+ - Rewards/margins: 0.2363
24
+ - Logps/rejected: -625.4311
25
+ - Logps/chosen: -651.8835
26
+ - Logits/rejected: -0.6965
27
+ - Logits/chosen: -0.7554
28
 
29
  ## Model description
30
 
 
45
  The following hyperparameters were used during training:
46
  - learning_rate: 5e-07
47
  - train_batch_size: 4
48
+ - eval_batch_size: 8
49
  - seed: 42
50
  - distributed_type: multi-GPU
51
  - num_devices: 8
52
+ - gradient_accumulation_steps: 4
53
+ - total_train_batch_size: 128
54
+ - total_eval_batch_size: 64
55
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
  - lr_scheduler_type: cosine
57
  - lr_scheduler_warmup_ratio: 0.1
 
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.6147 | 0.9984 | 477 | 0.6284 | -0.6611 | -0.8975 | 0.6406 | 0.2363 | -625.4311 | -651.8835 | -0.6965 | -0.7554 |
 
 
 
 
 
 
 
 
65
 
66
 
67
  ### Framework versions
68
 
69
+ - Transformers 4.45.2
70
+ - Pytorch 2.5.1+rocm6.2
71
+ - Datasets 3.5.0
72
  - Tokenizers 0.20.3
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 0.9994767137624281,
3
  "total_flos": 0.0,
4
- "train_loss": 0.5461677596207064,
5
- "train_runtime": 19976.9989,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 3.06,
8
- "train_steps_per_second": 0.048
9
  }
 
1
  {
2
+ "epoch": 0.9984301412872841,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6453013030238122,
5
+ "train_runtime": 27951.2038,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 2.187,
8
+ "train_steps_per_second": 0.017
9
  }
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "eos_token_id": 128001,
6
  "temperature": 0.6,
7
  "top_p": 0.9,
8
- "transformers_version": "4.45.0"
9
  }
 
5
  "eos_token_id": 128001,
6
  "temperature": 0.6,
7
  "top_p": 0.9,
8
+ "transformers_version": "4.45.2"
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 0.9994767137624281,
3
  "total_flos": 0.0,
4
- "train_loss": 0.5461677596207064,
5
- "train_runtime": 19976.9989,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 3.06,
8
- "train_steps_per_second": 0.048
9
  }
 
1
  {
2
+ "epoch": 0.9984301412872841,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6453013030238122,
5
+ "train_runtime": 27951.2038,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 2.187,
8
+ "train_steps_per_second": 0.017
9
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9994767137624281,
5
- "eval_steps": 100,
6
- "global_step": 955,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0010465724751439038,
13
- "grad_norm": 4.695332597526886,
14
- "learning_rate": 5.208333333333333e-09,
15
- "logits/chosen": -0.7187488675117493,
16
- "logits/rejected": -0.5983389019966125,
17
- "logps/chosen": -433.40936279296875,
18
- "logps/rejected": -346.82391357421875,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -23,1590 +23,742 @@
23
  "rewards/rejected": 0.0,
24
  "step": 1
25
  },
26
- {
27
- "epoch": 0.010465724751439037,
28
- "grad_norm": 4.094353723689003,
29
- "learning_rate": 5.208333333333333e-08,
30
- "logits/chosen": -0.4951174557209015,
31
- "logits/rejected": -0.4637991189956665,
32
- "logps/chosen": -283.0612487792969,
33
- "logps/rejected": -261.8544921875,
34
- "loss": 0.6932,
35
- "rewards/accuracies": 0.3888888955116272,
36
- "rewards/chosen": -0.00028089413535781205,
37
- "rewards/margins": -0.000883792235981673,
38
- "rewards/rejected": 0.0006028979551047087,
39
- "step": 10
40
- },
41
  {
42
  "epoch": 0.020931449502878074,
43
- "grad_norm": 4.493448151438676,
44
  "learning_rate": 1.0416666666666667e-07,
45
- "logits/chosen": -0.5218612551689148,
46
- "logits/rejected": -0.4537959694862366,
47
- "logps/chosen": -301.08050537109375,
48
- "logps/rejected": -288.3440856933594,
49
- "loss": 0.6933,
50
- "rewards/accuracies": 0.5249999761581421,
51
- "rewards/chosen": 0.0001712679659249261,
52
- "rewards/margins": 0.00038912202580831945,
53
- "rewards/rejected": -0.00021785404533147812,
54
- "step": 20
55
- },
56
- {
57
- "epoch": 0.03139717425431711,
58
- "grad_norm": 4.560411028573212,
59
- "learning_rate": 1.5624999999999999e-07,
60
- "logits/chosen": -0.5537956953048706,
61
- "logits/rejected": -0.47262096405029297,
62
- "logps/chosen": -287.0794372558594,
63
- "logps/rejected": -264.6256103515625,
64
  "loss": 0.6931,
65
- "rewards/accuracies": 0.512499988079071,
66
- "rewards/chosen": 0.0005557264084927738,
67
- "rewards/margins": -0.00028174123144708574,
68
- "rewards/rejected": 0.0008374677272513509,
69
- "step": 30
70
  },
71
  {
72
  "epoch": 0.04186289900575615,
73
- "grad_norm": 4.747451905252561,
74
  "learning_rate": 2.0833333333333333e-07,
75
- "logits/chosen": -0.5157141089439392,
76
- "logits/rejected": -0.42310914397239685,
77
- "logps/chosen": -324.68865966796875,
78
- "logps/rejected": -281.6961364746094,
79
- "loss": 0.6925,
80
- "rewards/accuracies": 0.6000000238418579,
81
- "rewards/chosen": 0.004000083543360233,
82
- "rewards/margins": 0.0023048361763358116,
83
- "rewards/rejected": 0.0016952479491010308,
84
- "step": 40
85
- },
86
- {
87
- "epoch": 0.052328623757195186,
88
- "grad_norm": 4.1236572047382545,
89
- "learning_rate": 2.604166666666667e-07,
90
- "logits/chosen": -0.4842318594455719,
91
- "logits/rejected": -0.4082712233066559,
92
- "logps/chosen": -241.1922607421875,
93
- "logps/rejected": -204.27723693847656,
94
- "loss": 0.6916,
95
- "rewards/accuracies": 0.6000000238418579,
96
- "rewards/chosen": 0.009572440758347511,
97
- "rewards/margins": 0.004367954097688198,
98
- "rewards/rejected": 0.0052044871263206005,
99
- "step": 50
100
  },
101
  {
102
  "epoch": 0.06279434850863422,
103
- "grad_norm": 4.38318666746926,
104
  "learning_rate": 3.1249999999999997e-07,
105
- "logits/chosen": -0.5186334252357483,
106
- "logits/rejected": -0.47192978858947754,
107
- "logps/chosen": -322.8270568847656,
108
- "logps/rejected": -279.4931640625,
109
- "loss": 0.6901,
110
- "rewards/accuracies": 0.6875,
111
- "rewards/chosen": 0.019420895725488663,
112
- "rewards/margins": 0.006516781635582447,
113
- "rewards/rejected": 0.012904113158583641,
114
- "step": 60
115
- },
116
- {
117
- "epoch": 0.07326007326007326,
118
- "grad_norm": 4.392009777368473,
119
- "learning_rate": 3.645833333333333e-07,
120
- "logits/chosen": -0.44262728095054626,
121
- "logits/rejected": -0.4167408049106598,
122
- "logps/chosen": -241.4150848388672,
123
- "logps/rejected": -265.60296630859375,
124
- "loss": 0.6873,
125
- "rewards/accuracies": 0.737500011920929,
126
- "rewards/chosen": 0.02704036608338356,
127
- "rewards/margins": 0.014237403869628906,
128
- "rewards/rejected": 0.01280296128243208,
129
- "step": 70
130
  },
131
  {
132
  "epoch": 0.0837257980115123,
133
- "grad_norm": 4.432632376020607,
134
  "learning_rate": 4.1666666666666667e-07,
135
- "logits/chosen": -0.5181559920310974,
136
- "logits/rejected": -0.4063476622104645,
137
- "logps/chosen": -259.34405517578125,
138
- "logps/rejected": -240.4481658935547,
139
- "loss": 0.683,
140
- "rewards/accuracies": 0.7250000238418579,
141
- "rewards/chosen": 0.032934751361608505,
142
- "rewards/margins": 0.019226713106036186,
143
- "rewards/rejected": 0.013708041980862617,
144
- "step": 80
145
- },
146
- {
147
- "epoch": 0.09419152276295134,
148
- "grad_norm": 3.7960885338069263,
149
- "learning_rate": 4.6874999999999996e-07,
150
- "logits/chosen": -0.4854390025138855,
151
- "logits/rejected": -0.44603580236434937,
152
- "logps/chosen": -280.05841064453125,
153
- "logps/rejected": -260.02008056640625,
154
- "loss": 0.6793,
155
- "rewards/accuracies": 0.6625000238418579,
156
- "rewards/chosen": 0.03177185729146004,
157
- "rewards/margins": 0.019615832716226578,
158
- "rewards/rejected": 0.01215602271258831,
159
- "step": 90
160
- },
161
- {
162
- "epoch": 0.10465724751439037,
163
- "grad_norm": 5.321767060392544,
164
- "learning_rate": 4.999732492681437e-07,
165
- "logits/chosen": -0.5290640592575073,
166
- "logits/rejected": -0.4432452321052551,
167
- "logps/chosen": -280.30877685546875,
168
- "logps/rejected": -274.52349853515625,
169
- "loss": 0.6703,
170
- "rewards/accuracies": 0.7124999761581421,
171
- "rewards/chosen": 0.03181055933237076,
172
- "rewards/margins": 0.04814142733812332,
173
- "rewards/rejected": -0.016330868005752563,
174
- "step": 100
175
  },
176
  {
177
  "epoch": 0.10465724751439037,
178
- "eval_logits/chosen": -0.5812728404998779,
179
- "eval_logits/rejected": -0.4756637215614319,
180
- "eval_logps/chosen": -285.9060974121094,
181
- "eval_logps/rejected": -265.83782958984375,
182
- "eval_loss": 0.6694991588592529,
183
- "eval_rewards/accuracies": 0.682539701461792,
184
- "eval_rewards/chosen": 0.017283162102103233,
185
- "eval_rewards/margins": 0.05808327719569206,
186
- "eval_rewards/rejected": -0.04080010578036308,
187
- "eval_runtime": 227.6437,
188
- "eval_samples_per_second": 8.786,
189
- "eval_steps_per_second": 0.277,
190
- "step": 100
191
- },
192
- {
193
- "epoch": 0.1151229722658294,
194
- "grad_norm": 4.6587263741397384,
195
- "learning_rate": 4.996723692767926e-07,
196
- "logits/chosen": -0.5552923083305359,
197
- "logits/rejected": -0.463242769241333,
198
- "logps/chosen": -263.10443115234375,
199
- "logps/rejected": -231.2692413330078,
200
- "loss": 0.6685,
201
- "rewards/accuracies": 0.737500011920929,
202
- "rewards/chosen": 0.011513032019138336,
203
- "rewards/margins": 0.08094070851802826,
204
- "rewards/rejected": -0.06942768394947052,
205
- "step": 110
206
  },
207
  {
208
  "epoch": 0.12558869701726844,
209
- "grad_norm": 5.15038375709292,
210
- "learning_rate": 4.990375746213598e-07,
211
- "logits/chosen": -0.5475479960441589,
212
- "logits/rejected": -0.5249751210212708,
213
- "logps/chosen": -238.50537109375,
214
- "logps/rejected": -264.0198974609375,
215
- "loss": 0.6508,
216
- "rewards/accuracies": 0.737500011920929,
217
- "rewards/chosen": -0.0012080561136826873,
218
- "rewards/margins": 0.08254374563694,
219
- "rewards/rejected": -0.08375179767608643,
220
- "step": 120
221
- },
222
- {
223
- "epoch": 0.1360544217687075,
224
- "grad_norm": 5.6036300426343715,
225
- "learning_rate": 4.980697142834314e-07,
226
- "logits/chosen": -0.6076304316520691,
227
- "logits/rejected": -0.5554308295249939,
228
- "logps/chosen": -297.47943115234375,
229
- "logps/rejected": -291.3108825683594,
230
- "loss": 0.6389,
231
- "rewards/accuracies": 0.699999988079071,
232
- "rewards/chosen": -0.10223034769296646,
233
- "rewards/margins": 0.1436336785554886,
234
- "rewards/rejected": -0.24586400389671326,
235
- "step": 130
236
  },
237
  {
238
  "epoch": 0.14652014652014653,
239
- "grad_norm": 6.407440966129605,
240
- "learning_rate": 4.967700826904229e-07,
241
- "logits/chosen": -0.576185405254364,
242
- "logits/rejected": -0.5214563608169556,
243
- "logps/chosen": -312.6526794433594,
244
- "logps/rejected": -317.4207763671875,
245
- "loss": 0.6255,
246
- "rewards/accuracies": 0.7124999761581421,
247
- "rewards/chosen": -0.17939871549606323,
248
- "rewards/margins": 0.13575369119644165,
249
- "rewards/rejected": -0.3151523768901825,
250
- "step": 140
251
- },
252
- {
253
- "epoch": 0.15698587127158556,
254
- "grad_norm": 6.101819441628303,
255
- "learning_rate": 4.951404179843962e-07,
256
- "logits/chosen": -0.7110940217971802,
257
- "logits/rejected": -0.5994465947151184,
258
- "logps/chosen": -330.38751220703125,
259
- "logps/rejected": -281.0096435546875,
260
- "loss": 0.6202,
261
- "rewards/accuracies": 0.737500011920929,
262
- "rewards/chosen": -0.14434388279914856,
263
- "rewards/margins": 0.2144981324672699,
264
- "rewards/rejected": -0.35884204506874084,
265
- "step": 150
266
  },
267
  {
268
  "epoch": 0.1674515960230246,
269
- "grad_norm": 20.68712383660321,
270
- "learning_rate": 4.931828996974498e-07,
271
- "logits/chosen": -0.7509492635726929,
272
- "logits/rejected": -0.571489155292511,
273
- "logps/chosen": -354.3675231933594,
274
- "logps/rejected": -311.22052001953125,
275
- "loss": 0.5982,
276
- "rewards/accuracies": 0.7875000238418579,
277
- "rewards/chosen": -0.2555951774120331,
278
- "rewards/margins": 0.3567826747894287,
279
- "rewards/rejected": -0.6123778223991394,
280
- "step": 160
281
- },
282
- {
283
- "epoch": 0.17791732077446362,
284
- "grad_norm": 8.080191404091543,
285
- "learning_rate": 4.909001458367866e-07,
286
- "logits/chosen": -0.6748760938644409,
287
- "logits/rejected": -0.5670477747917175,
288
- "logps/chosen": -308.5774841308594,
289
- "logps/rejected": -334.3050842285156,
290
- "loss": 0.5899,
291
- "rewards/accuracies": 0.6875,
292
- "rewards/chosen": -0.4368668496608734,
293
- "rewards/margins": 0.32547345757484436,
294
- "rewards/rejected": -0.7623404264450073,
295
- "step": 170
296
  },
297
  {
298
  "epoch": 0.18838304552590268,
299
- "grad_norm": 11.93364822141488,
300
- "learning_rate": 4.882952093833627e-07,
301
- "logits/chosen": -0.7000716924667358,
302
- "logits/rejected": -0.6841859221458435,
303
- "logps/chosen": -266.9092712402344,
304
- "logps/rejected": -318.55902099609375,
305
- "loss": 0.5811,
306
- "rewards/accuracies": 0.7124999761581421,
307
- "rewards/chosen": -0.33145153522491455,
308
- "rewards/margins": 0.372114360332489,
309
- "rewards/rejected": -0.7035658955574036,
310
- "step": 180
311
- },
312
- {
313
- "epoch": 0.1988487702773417,
314
- "grad_norm": 8.64087542458168,
315
- "learning_rate": 4.853715742087946e-07,
316
- "logits/chosen": -0.6523844003677368,
317
- "logits/rejected": -0.6216700673103333,
318
- "logps/chosen": -304.2262268066406,
319
- "logps/rejected": -387.6643981933594,
320
- "loss": 0.5856,
321
- "rewards/accuracies": 0.762499988079071,
322
- "rewards/chosen": -0.47744685411453247,
323
- "rewards/margins": 0.3826759457588196,
324
- "rewards/rejected": -0.860122799873352,
325
- "step": 190
326
- },
327
- {
328
- "epoch": 0.20931449502878074,
329
- "grad_norm": 12.162378152563793,
330
- "learning_rate": 4.821331504159906e-07,
331
- "logits/chosen": -0.8054048418998718,
332
- "logits/rejected": -0.660961925983429,
333
- "logps/chosen": -370.180419921875,
334
- "logps/rejected": -326.85565185546875,
335
- "loss": 0.5922,
336
- "rewards/accuracies": 0.7124999761581421,
337
- "rewards/chosen": -0.3945849537849426,
338
- "rewards/margins": 0.4013177454471588,
339
- "rewards/rejected": -0.7959026098251343,
340
- "step": 200
341
  },
342
  {
343
  "epoch": 0.20931449502878074,
344
- "eval_logits/chosen": -0.74112468957901,
345
- "eval_logits/rejected": -0.6443408131599426,
346
- "eval_logps/chosen": -333.7917175292969,
347
- "eval_logps/rejected": -346.79608154296875,
348
- "eval_loss": 0.5902364253997803,
349
- "eval_rewards/accuracies": 0.7063491940498352,
350
- "eval_rewards/chosen": -0.46157318353652954,
351
- "eval_rewards/margins": 0.388809472322464,
352
- "eval_rewards/rejected": -0.8503828048706055,
353
- "eval_runtime": 226.0253,
354
- "eval_samples_per_second": 8.849,
355
- "eval_steps_per_second": 0.279,
356
- "step": 200
357
- },
358
- {
359
- "epoch": 0.21978021978021978,
360
- "grad_norm": 9.47218509665613,
361
- "learning_rate": 4.785842691097342e-07,
362
- "logits/chosen": -0.6910367012023926,
363
- "logits/rejected": -0.6332982182502747,
364
- "logps/chosen": -335.70611572265625,
365
- "logps/rejected": -382.6346740722656,
366
- "loss": 0.5955,
367
- "rewards/accuracies": 0.6625000238418579,
368
- "rewards/chosen": -0.5499948859214783,
369
- "rewards/margins": 0.3104594051837921,
370
- "rewards/rejected": -0.860454261302948,
371
- "step": 210
372
  },
373
  {
374
  "epoch": 0.2302459445316588,
375
- "grad_norm": 9.803517786698613,
376
- "learning_rate": 4.7472967660421603e-07,
377
- "logits/chosen": -0.7180417776107788,
378
- "logits/rejected": -0.621491014957428,
379
- "logps/chosen": -331.8623046875,
380
- "logps/rejected": -331.8077087402344,
381
- "loss": 0.5901,
382
- "rewards/accuracies": 0.7250000238418579,
383
- "rewards/chosen": -0.46668997406959534,
384
- "rewards/margins": 0.48732930421829224,
385
- "rewards/rejected": -0.9540191888809204,
386
- "step": 220
387
- },
388
- {
389
- "epoch": 0.24071166928309787,
390
- "grad_norm": 10.560915230537866,
391
- "learning_rate": 4.705745280752585e-07,
392
- "logits/chosen": -0.5524531006813049,
393
- "logits/rejected": -0.5356574058532715,
394
- "logps/chosen": -321.02392578125,
395
- "logps/rejected": -364.1357116699219,
396
- "loss": 0.578,
397
- "rewards/accuracies": 0.75,
398
- "rewards/chosen": -0.621025800704956,
399
- "rewards/margins": 0.4221344590187073,
400
- "rewards/rejected": -1.0431602001190186,
401
- "step": 230
402
  },
403
  {
404
  "epoch": 0.25117739403453687,
405
- "grad_norm": 9.336666465482518,
406
- "learning_rate": 4.6612438066572555e-07,
407
- "logits/chosen": -0.5498469471931458,
408
- "logits/rejected": -0.5183900594711304,
409
- "logps/chosen": -326.112060546875,
410
- "logps/rejected": -366.96673583984375,
411
- "loss": 0.5612,
412
- "rewards/accuracies": 0.762499988079071,
413
- "rewards/chosen": -0.6049496531486511,
414
- "rewards/margins": 0.49857011437416077,
415
- "rewards/rejected": -1.1035196781158447,
416
- "step": 240
417
- },
418
- {
419
- "epoch": 0.2616431187859759,
420
- "grad_norm": 14.340514917233914,
421
- "learning_rate": 4.6138518605333664e-07,
422
- "logits/chosen": -0.5425869226455688,
423
- "logits/rejected": -0.5251628160476685,
424
- "logps/chosen": -321.1498107910156,
425
- "logps/rejected": -371.6676330566406,
426
- "loss": 0.5388,
427
- "rewards/accuracies": 0.8374999761581421,
428
- "rewards/chosen": -0.5140849351882935,
429
- "rewards/margins": 0.5452972650527954,
430
- "rewards/rejected": -1.0593822002410889,
431
- "step": 250
432
  },
433
  {
434
  "epoch": 0.272108843537415,
435
- "grad_norm": 10.300342307961989,
436
- "learning_rate": 4.5636328249082514e-07,
437
- "logits/chosen": -0.6172841787338257,
438
- "logits/rejected": -0.45737963914871216,
439
- "logps/chosen": -340.3819274902344,
440
- "logps/rejected": -368.44586181640625,
441
- "loss": 0.5476,
442
- "rewards/accuracies": 0.6625000238418579,
443
- "rewards/chosen": -0.6206396222114563,
444
- "rewards/margins": 0.3971046805381775,
445
- "rewards/rejected": -1.0177444219589233,
446
- "step": 260
447
- },
448
- {
449
- "epoch": 0.282574568288854,
450
- "grad_norm": 11.855640878189652,
451
- "learning_rate": 4.510653863290871e-07,
452
- "logits/chosen": -0.5061743259429932,
453
- "logits/rejected": -0.3946554660797119,
454
- "logps/chosen": -368.46331787109375,
455
- "logps/rejected": -373.1842346191406,
456
- "loss": 0.5366,
457
- "rewards/accuracies": 0.7250000238418579,
458
- "rewards/chosen": -0.6316210627555847,
459
- "rewards/margins": 0.5877398252487183,
460
- "rewards/rejected": -1.2193609476089478,
461
- "step": 270
462
  },
463
  {
464
  "epoch": 0.29304029304029305,
465
- "grad_norm": 13.86402294982031,
466
- "learning_rate": 4.4549858303465737e-07,
467
- "logits/chosen": -0.4837842881679535,
468
- "logits/rejected": -0.3030511736869812,
469
- "logps/chosen": -394.0540771484375,
470
- "logps/rejected": -411.68902587890625,
471
- "loss": 0.567,
472
- "rewards/accuracies": 0.7875000238418579,
473
- "rewards/chosen": -0.7208216786384583,
474
- "rewards/margins": 0.6727169752120972,
475
- "rewards/rejected": -1.3935387134552002,
476
- "step": 280
477
- },
478
- {
479
- "epoch": 0.3035060177917321,
480
- "grad_norm": 14.705610202375755,
481
- "learning_rate": 4.396703177135261e-07,
482
- "logits/chosen": -0.5852281451225281,
483
- "logits/rejected": -0.36015063524246216,
484
- "logps/chosen": -368.44146728515625,
485
- "logps/rejected": -360.4360046386719,
486
- "loss": 0.5489,
487
- "rewards/accuracies": 0.75,
488
- "rewards/chosen": -0.6637938022613525,
489
- "rewards/margins": 0.5562569499015808,
490
- "rewards/rejected": -1.2200508117675781,
491
- "step": 290
492
- },
493
- {
494
- "epoch": 0.3139717425431711,
495
- "grad_norm": 14.361033433220218,
496
- "learning_rate": 4.335883851539693e-07,
497
- "logits/chosen": -0.36654001474380493,
498
- "logits/rejected": -0.317962646484375,
499
- "logps/chosen": -305.54656982421875,
500
- "logps/rejected": -349.1251525878906,
501
- "loss": 0.5592,
502
- "rewards/accuracies": 0.6625000238418579,
503
- "rewards/chosen": -0.5999817848205566,
504
- "rewards/margins": 0.45001593232154846,
505
- "rewards/rejected": -1.0499978065490723,
506
- "step": 300
507
  },
508
  {
509
  "epoch": 0.3139717425431711,
510
- "eval_logits/chosen": -0.43302780389785767,
511
- "eval_logits/rejected": -0.2678911089897156,
512
- "eval_logps/chosen": -349.07769775390625,
513
- "eval_logps/rejected": -383.3018493652344,
514
- "eval_loss": 0.5462030172348022,
515
- "eval_rewards/accuracies": 0.7420634627342224,
516
- "eval_rewards/chosen": -0.6144329309463501,
517
- "eval_rewards/margins": 0.6010070443153381,
518
- "eval_rewards/rejected": -1.2154401540756226,
519
- "eval_runtime": 227.0069,
520
- "eval_samples_per_second": 8.81,
521
- "eval_steps_per_second": 0.278,
522
- "step": 300
523
- },
524
- {
525
- "epoch": 0.32443746729461015,
526
- "grad_norm": 12.59126443650798,
527
- "learning_rate": 4.272609194017105e-07,
528
- "logits/chosen": -0.5242592096328735,
529
- "logits/rejected": -0.22999849915504456,
530
- "logps/chosen": -345.8105163574219,
531
- "logps/rejected": -334.08111572265625,
532
- "loss": 0.5479,
533
- "rewards/accuracies": 0.7250000238418579,
534
- "rewards/chosen": -0.6000981330871582,
535
- "rewards/margins": 0.637532114982605,
536
- "rewards/rejected": -1.2376301288604736,
537
- "step": 310
538
  },
539
  {
540
  "epoch": 0.3349031920460492,
541
- "grad_norm": 11.732525849526553,
542
- "learning_rate": 4.2069638288135547e-07,
543
- "logits/chosen": -0.34528014063835144,
544
- "logits/rejected": -0.2457645833492279,
545
- "logps/chosen": -330.71527099609375,
546
- "logps/rejected": -412.20953369140625,
547
- "loss": 0.55,
548
- "rewards/accuracies": 0.675000011920929,
549
- "rewards/chosen": -0.695503294467926,
550
- "rewards/margins": 0.5903774499893188,
551
- "rewards/rejected": -1.2858808040618896,
552
- "step": 320
553
- },
554
- {
555
- "epoch": 0.3453689167974882,
556
- "grad_norm": 13.250553221571959,
557
- "learning_rate": 4.139035550786494e-07,
558
- "logits/chosen": -0.21062567830085754,
559
- "logits/rejected": -0.08340445160865784,
560
- "logps/chosen": -331.62542724609375,
561
- "logps/rejected": -363.3686828613281,
562
- "loss": 0.5412,
563
- "rewards/accuracies": 0.75,
564
- "rewards/chosen": -0.7190684676170349,
565
- "rewards/margins": 0.5235351920127869,
566
- "rewards/rejected": -1.2426036596298218,
567
- "step": 330
568
  },
569
  {
570
  "epoch": 0.35583464154892724,
571
- "grad_norm": 12.344702305971902,
572
- "learning_rate": 4.0689152079869306e-07,
573
- "logits/chosen": -0.05835915356874466,
574
- "logits/rejected": 0.08525937795639038,
575
- "logps/chosen": -335.3782958984375,
576
- "logps/rejected": -347.57257080078125,
577
- "loss": 0.5462,
578
- "rewards/accuracies": 0.637499988079071,
579
- "rewards/chosen": -0.8475249409675598,
580
- "rewards/margins": 0.44163185358047485,
581
- "rewards/rejected": -1.2891566753387451,
582
- "step": 340
583
- },
584
- {
585
- "epoch": 0.3663003663003663,
586
- "grad_norm": 13.72798924306665,
587
- "learning_rate": 3.99669658015821e-07,
588
- "logits/chosen": -0.2664807438850403,
589
- "logits/rejected": -0.0909152552485466,
590
- "logps/chosen": -316.62506103515625,
591
- "logps/rejected": -341.7252197265625,
592
- "loss": 0.535,
593
- "rewards/accuracies": 0.675000011920929,
594
- "rewards/chosen": -0.6483460664749146,
595
- "rewards/margins": 0.47694116830825806,
596
- "rewards/rejected": -1.1252872943878174,
597
- "step": 350
598
  },
599
  {
600
  "epoch": 0.37676609105180536,
601
- "grad_norm": 16.67040726492694,
602
- "learning_rate": 3.92247625331392e-07,
603
- "logits/chosen": -0.3472316861152649,
604
- "logits/rejected": -0.20045724511146545,
605
- "logps/chosen": -332.7682800292969,
606
- "logps/rejected": -366.4549865722656,
607
- "loss": 0.5055,
608
- "rewards/accuracies": 0.800000011920929,
609
- "rewards/chosen": -0.5464685559272766,
610
- "rewards/margins": 0.5930379629135132,
611
- "rewards/rejected": -1.1395065784454346,
612
- "step": 360
613
- },
614
- {
615
- "epoch": 0.3872318158032444,
616
- "grad_norm": 14.93675802203948,
617
- "learning_rate": 3.846353490562664e-07,
618
- "logits/chosen": -0.325096994638443,
619
- "logits/rejected": -0.2412107288837433,
620
- "logps/chosen": -333.1510925292969,
621
- "logps/rejected": -368.9770202636719,
622
- "loss": 0.5258,
623
- "rewards/accuracies": 0.7124999761581421,
624
- "rewards/chosen": -0.7361698746681213,
625
- "rewards/margins": 0.49931269884109497,
626
- "rewards/rejected": -1.2354824542999268,
627
- "step": 370
628
  },
629
  {
630
  "epoch": 0.3976975405546834,
631
- "grad_norm": 13.251455539085676,
632
- "learning_rate": 3.768430099352445e-07,
633
- "logits/chosen": -0.21928231418132782,
634
- "logits/rejected": -0.08141541481018066,
635
- "logps/chosen": -347.12762451171875,
636
- "logps/rejected": -409.29571533203125,
637
- "loss": 0.5477,
638
- "rewards/accuracies": 0.7124999761581421,
639
- "rewards/chosen": -0.8551673889160156,
640
- "rewards/margins": 0.5591238737106323,
641
- "rewards/rejected": -1.4142911434173584,
642
- "step": 380
643
- },
644
- {
645
- "epoch": 0.40816326530612246,
646
- "grad_norm": 12.09475227958707,
647
- "learning_rate": 3.6888102953122304e-07,
648
- "logits/chosen": -0.19851084053516388,
649
- "logits/rejected": -0.08052431792020798,
650
- "logps/chosen": -308.36724853515625,
651
- "logps/rejected": -357.89349365234375,
652
- "loss": 0.542,
653
- "rewards/accuracies": 0.6875,
654
- "rewards/chosen": -0.7273977398872375,
655
- "rewards/margins": 0.501800537109375,
656
- "rewards/rejected": -1.2291982173919678,
657
- "step": 390
658
- },
659
- {
660
- "epoch": 0.4186289900575615,
661
- "grad_norm": 27.84407880505837,
662
- "learning_rate": 3.607600562872785e-07,
663
- "logits/chosen": -0.20401215553283691,
664
- "logits/rejected": -0.21550790965557098,
665
- "logps/chosen": -336.02923583984375,
666
- "logps/rejected": -415.23760986328125,
667
- "loss": 0.5461,
668
- "rewards/accuracies": 0.6625000238418579,
669
- "rewards/chosen": -0.8249620199203491,
670
- "rewards/margins": 0.3732803463935852,
671
- "rewards/rejected": -1.1982421875,
672
- "step": 400
673
  },
674
  {
675
  "epoch": 0.4186289900575615,
676
- "eval_logits/chosen": -0.24119864404201508,
677
- "eval_logits/rejected": -0.010025140829384327,
678
- "eval_logps/chosen": -357.9294738769531,
679
- "eval_logps/rejected": -397.44207763671875,
680
- "eval_loss": 0.5322944521903992,
681
- "eval_rewards/accuracies": 0.738095223903656,
682
- "eval_rewards/chosen": -0.7029505372047424,
683
- "eval_rewards/margins": 0.6538920998573303,
684
- "eval_rewards/rejected": -1.3568426370620728,
685
- "eval_runtime": 230.0486,
686
- "eval_samples_per_second": 8.694,
687
- "eval_steps_per_second": 0.274,
688
- "step": 400
689
- },
690
- {
691
- "epoch": 0.4290947148090005,
692
- "grad_norm": 14.555012611239015,
693
- "learning_rate": 3.5249095128531856e-07,
694
- "logits/chosen": -0.054116807878017426,
695
- "logits/rejected": 0.22516992688179016,
696
- "logps/chosen": -371.7142639160156,
697
- "logps/rejected": -408.3119201660156,
698
- "loss": 0.5431,
699
- "rewards/accuracies": 0.737500011920929,
700
- "rewards/chosen": -0.9307588338851929,
701
- "rewards/margins": 0.6702965497970581,
702
- "rewards/rejected": -1.6010555028915405,
703
- "step": 410
704
  },
705
  {
706
  "epoch": 0.43956043956043955,
707
- "grad_norm": 13.358004942776654,
708
- "learning_rate": 3.4408477372034736e-07,
709
- "logits/chosen": -0.11378423124551773,
710
- "logits/rejected": 0.2778463363647461,
711
- "logps/chosen": -429.87109375,
712
- "logps/rejected": -423.0447692871094,
713
- "loss": 0.5372,
714
- "rewards/accuracies": 0.699999988079071,
715
- "rewards/chosen": -1.0181143283843994,
716
- "rewards/margins": 0.6527779698371887,
717
- "rewards/rejected": -1.670892357826233,
718
- "step": 420
719
- },
720
- {
721
- "epoch": 0.4500261643118786,
722
- "grad_norm": 13.83059515121346,
723
- "learning_rate": 3.3555276610977276e-07,
724
- "logits/chosen": 0.1387651413679123,
725
- "logits/rejected": 0.3719004988670349,
726
- "logps/chosen": -305.9029235839844,
727
- "logps/rejected": -406.22869873046875,
728
- "loss": 0.5252,
729
- "rewards/accuracies": 0.7749999761581421,
730
- "rewards/chosen": -0.9015353918075562,
731
- "rewards/margins": 0.7788639664649963,
732
- "rewards/rejected": -1.6803992986679077,
733
- "step": 430
734
  },
735
  {
736
  "epoch": 0.4604918890633176,
737
- "grad_norm": 16.016907625597792,
738
- "learning_rate": 3.269063392575352e-07,
739
- "logits/chosen": 0.27102214097976685,
740
- "logits/rejected": 0.3719123899936676,
741
- "logps/chosen": -328.58782958984375,
742
- "logps/rejected": -388.52410888671875,
743
- "loss": 0.5343,
744
- "rewards/accuracies": 0.675000011920929,
745
- "rewards/chosen": -0.8982616662979126,
746
- "rewards/margins": 0.6498668789863586,
747
- "rewards/rejected": -1.5481284856796265,
748
- "step": 440
749
- },
750
- {
751
- "epoch": 0.47095761381475665,
752
- "grad_norm": 11.748927642696774,
753
- "learning_rate": 3.1815705699316964e-07,
754
- "logits/chosen": 0.14817403256893158,
755
- "logits/rejected": 0.3451501727104187,
756
- "logps/chosen": -369.4537658691406,
757
- "logps/rejected": -422.6024475097656,
758
- "loss": 0.5277,
759
- "rewards/accuracies": 0.762499988079071,
760
- "rewards/chosen": -0.9175182580947876,
761
- "rewards/margins": 0.7517377138137817,
762
- "rewards/rejected": -1.6692559719085693,
763
- "step": 450
764
  },
765
  {
766
  "epoch": 0.48142333856619574,
767
- "grad_norm": 12.97699616095586,
768
- "learning_rate": 3.0931662070620794e-07,
769
- "logits/chosen": -0.02564959228038788,
770
- "logits/rejected": 0.29681330919265747,
771
- "logps/chosen": -292.01348876953125,
772
- "logps/rejected": -352.1918029785156,
773
- "loss": 0.5503,
774
- "rewards/accuracies": 0.7875000238418579,
775
- "rewards/chosen": -0.6568984389305115,
776
- "rewards/margins": 0.8044195175170898,
777
- "rewards/rejected": -1.461318016052246,
778
- "step": 460
779
- },
780
- {
781
- "epoch": 0.49188906331763477,
782
- "grad_norm": 13.884696801765326,
783
- "learning_rate": 3.003968536966078e-07,
784
- "logits/chosen": 0.07226122915744781,
785
- "logits/rejected": 0.24507398903369904,
786
- "logps/chosen": -342.1966247558594,
787
- "logps/rejected": -380.1782531738281,
788
- "loss": 0.4913,
789
- "rewards/accuracies": 0.7250000238418579,
790
- "rewards/chosen": -0.6603950262069702,
791
- "rewards/margins": 0.6150242686271667,
792
- "rewards/rejected": -1.2754193544387817,
793
- "step": 470
794
  },
795
  {
796
  "epoch": 0.5023547880690737,
797
- "grad_norm": 16.314360967355586,
798
- "learning_rate": 2.9140968536213693e-07,
799
- "logits/chosen": 0.1461777538061142,
800
- "logits/rejected": 0.5193515419960022,
801
- "logps/chosen": -365.03143310546875,
802
- "logps/rejected": -450.1629333496094,
803
- "loss": 0.5062,
804
- "rewards/accuracies": 0.7875000238418579,
805
- "rewards/chosen": -0.841938853263855,
806
- "rewards/margins": 0.9499468803405762,
807
- "rewards/rejected": -1.7918857336044312,
808
- "step": 480
809
- },
810
- {
811
- "epoch": 0.5128205128205128,
812
- "grad_norm": 17.255358445672606,
813
- "learning_rate": 2.823671352438608e-07,
814
- "logits/chosen": 0.15712898969650269,
815
- "logits/rejected": 0.6440854072570801,
816
- "logps/chosen": -392.81866455078125,
817
- "logps/rejected": -430.689208984375,
818
- "loss": 0.5112,
819
- "rewards/accuracies": 0.8125,
820
- "rewards/chosen": -0.8158360719680786,
821
- "rewards/margins": 0.96565181016922,
822
- "rewards/rejected": -1.7814878225326538,
823
- "step": 490
824
- },
825
- {
826
- "epoch": 0.5232862375719518,
827
- "grad_norm": 12.135159172710598,
828
- "learning_rate": 2.73281296951072e-07,
829
- "logits/chosen": 0.16401800513267517,
830
- "logits/rejected": 0.4998152256011963,
831
- "logps/chosen": -322.4615783691406,
832
- "logps/rejected": -393.34197998046875,
833
- "loss": 0.5211,
834
- "rewards/accuracies": 0.75,
835
- "rewards/chosen": -0.8438663482666016,
836
- "rewards/margins": 0.9155359268188477,
837
- "rewards/rejected": -1.7594020366668701,
838
- "step": 500
839
  },
840
  {
841
  "epoch": 0.5232862375719518,
842
- "eval_logits/chosen": 0.24328011274337769,
843
- "eval_logits/rejected": 0.5345854759216309,
844
- "eval_logps/chosen": -396.3761901855469,
845
- "eval_logps/rejected": -449.1320495605469,
846
- "eval_loss": 0.5214627981185913,
847
- "eval_rewards/accuracies": 0.7341269850730896,
848
- "eval_rewards/chosen": -1.087417721748352,
849
- "eval_rewards/margins": 0.7863243818283081,
850
- "eval_rewards/rejected": -1.8737419843673706,
851
- "eval_runtime": 226.3881,
852
- "eval_samples_per_second": 8.834,
853
- "eval_steps_per_second": 0.278,
854
- "step": 500
855
- },
856
- {
857
- "epoch": 0.533751962323391,
858
- "grad_norm": 13.608551630251748,
859
- "learning_rate": 2.641643219871597e-07,
860
- "logits/chosen": 0.27356186509132385,
861
- "logits/rejected": 0.4992881417274475,
862
- "logps/chosen": -343.2596435546875,
863
- "logps/rejected": -415.26275634765625,
864
- "loss": 0.4984,
865
- "rewards/accuracies": 0.7749999761581421,
866
- "rewards/chosen": -0.923366367816925,
867
- "rewards/margins": 0.8507606387138367,
868
- "rewards/rejected": -1.7741270065307617,
869
- "step": 510
870
  },
871
  {
872
  "epoch": 0.54421768707483,
873
- "grad_norm": 14.648106425594214,
874
- "learning_rate": 2.550284034980507e-07,
875
- "logits/chosen": 0.24416789412498474,
876
- "logits/rejected": 0.3334105610847473,
877
- "logps/chosen": -345.0733642578125,
878
- "logps/rejected": -388.39544677734375,
879
- "loss": 0.5276,
880
- "rewards/accuracies": 0.6499999761581421,
881
- "rewards/chosen": -0.8896657824516296,
882
- "rewards/margins": 0.5996683835983276,
883
- "rewards/rejected": -1.489334225654602,
884
- "step": 520
885
- },
886
- {
887
- "epoch": 0.554683411826269,
888
- "grad_norm": 18.62263649476354,
889
- "learning_rate": 2.4588575996495794e-07,
890
- "logits/chosen": 0.11800795793533325,
891
- "logits/rejected": 0.39625564217567444,
892
- "logps/chosen": -364.69830322265625,
893
- "logps/rejected": -451.4327697753906,
894
- "loss": 0.5108,
895
- "rewards/accuracies": 0.8125,
896
- "rewards/chosen": -0.6179436445236206,
897
- "rewards/margins": 1.0892958641052246,
898
- "rewards/rejected": -1.7072395086288452,
899
- "step": 530
900
  },
901
  {
902
  "epoch": 0.565149136577708,
903
- "grad_norm": 14.08705064458867,
904
- "learning_rate": 2.367486188632446e-07,
905
- "logits/chosen": 0.32275527715682983,
906
- "logits/rejected": 0.5255266427993774,
907
- "logps/chosen": -370.3330383300781,
908
- "logps/rejected": -402.99365234375,
909
- "loss": 0.5189,
910
- "rewards/accuracies": 0.699999988079071,
911
- "rewards/chosen": -0.8699628114700317,
912
- "rewards/margins": 0.6969233155250549,
913
- "rewards/rejected": -1.5668861865997314,
914
- "step": 540
915
- },
916
- {
917
- "epoch": 0.5756148613291471,
918
- "grad_norm": 17.14042985699126,
919
- "learning_rate": 2.276292003092593e-07,
920
- "logits/chosen": 0.22727811336517334,
921
- "logits/rejected": 0.5902983546257019,
922
- "logps/chosen": -366.0690002441406,
923
- "logps/rejected": -373.06427001953125,
924
- "loss": 0.514,
925
- "rewards/accuracies": 0.637499988079071,
926
- "rewards/chosen": -0.8379491567611694,
927
- "rewards/margins": 0.5861997008323669,
928
- "rewards/rejected": -1.4241489171981812,
929
- "step": 550
930
  },
931
  {
932
  "epoch": 0.5860805860805861,
933
- "grad_norm": 23.49625757400348,
934
- "learning_rate": 2.185397007170141e-07,
935
- "logits/chosen": 0.4517914354801178,
936
- "logits/rejected": 0.6837216019630432,
937
- "logps/chosen": -288.7613525390625,
938
- "logps/rejected": -369.98590087890625,
939
- "loss": 0.5293,
940
- "rewards/accuracies": 0.75,
941
- "rewards/chosen": -0.694735050201416,
942
- "rewards/margins": 0.7958725690841675,
943
- "rewards/rejected": -1.490607500076294,
944
- "step": 560
945
- },
946
- {
947
- "epoch": 0.5965463108320251,
948
- "grad_norm": 17.265899168005756,
949
- "learning_rate": 2.094922764865619e-07,
950
- "logits/chosen": 0.43373093008995056,
951
- "logits/rejected": 0.621323823928833,
952
- "logps/chosen": -347.9602966308594,
953
- "logps/rejected": -448.27166748046875,
954
- "loss": 0.5007,
955
- "rewards/accuracies": 0.737500011920929,
956
- "rewards/chosen": -1.0440847873687744,
957
- "rewards/margins": 0.6639469265937805,
958
- "rewards/rejected": -1.7080316543579102,
959
- "step": 570
960
  },
961
  {
962
  "epoch": 0.6070120355834642,
963
- "grad_norm": 20.271007557729554,
964
- "learning_rate": 2.0049902774588797e-07,
965
- "logits/chosen": 0.3310932517051697,
966
- "logits/rejected": 0.6366420984268188,
967
- "logps/chosen": -326.4140625,
968
- "logps/rejected": -378.14251708984375,
969
- "loss": 0.5319,
970
- "rewards/accuracies": 0.7250000238418579,
971
- "rewards/chosen": -0.8602741956710815,
972
- "rewards/margins": 0.7902485728263855,
973
- "rewards/rejected": -1.6505229473114014,
974
- "step": 580
975
- },
976
- {
977
- "epoch": 0.6174777603349032,
978
- "grad_norm": 14.597787676388892,
979
- "learning_rate": 1.9157198216806238e-07,
980
- "logits/chosen": 0.1060560941696167,
981
- "logits/rejected": 0.4027911126613617,
982
- "logps/chosen": -391.3057861328125,
983
- "logps/rejected": -447.2630310058594,
984
- "loss": 0.5146,
985
- "rewards/accuracies": 0.7250000238418579,
986
- "rewards/chosen": -0.9476065635681152,
987
- "rewards/margins": 0.8027095794677734,
988
- "rewards/rejected": -1.7503160238265991,
989
- "step": 590
990
- },
991
- {
992
- "epoch": 0.6279434850863422,
993
- "grad_norm": 13.72232592119506,
994
- "learning_rate": 1.8272307888529274e-07,
995
- "logits/chosen": 0.25353509187698364,
996
- "logits/rejected": 0.5968682169914246,
997
- "logps/chosen": -425.5113830566406,
998
- "logps/rejected": -419.0316467285156,
999
- "loss": 0.4932,
1000
- "rewards/accuracies": 0.7749999761581421,
1001
- "rewards/chosen": -0.7882476449012756,
1002
- "rewards/margins": 0.640188992023468,
1003
- "rewards/rejected": -1.4284366369247437,
1004
- "step": 600
1005
  },
1006
  {
1007
  "epoch": 0.6279434850863422,
1008
- "eval_logits/chosen": 0.12464320659637451,
1009
- "eval_logits/rejected": 0.42349082231521606,
1010
- "eval_logps/chosen": -360.2087707519531,
1011
- "eval_logps/rejected": -411.3826904296875,
1012
- "eval_loss": 0.5179768800735474,
1013
- "eval_rewards/accuracies": 0.7539682388305664,
1014
- "eval_rewards/chosen": -0.7257437109947205,
1015
- "eval_rewards/margins": 0.7705051302909851,
1016
- "eval_rewards/rejected": -1.4962489604949951,
1017
- "eval_runtime": 228.2238,
1018
- "eval_samples_per_second": 8.763,
1019
- "eval_steps_per_second": 0.276,
1020
- "step": 600
1021
- },
1022
- {
1023
- "epoch": 0.6384092098377813,
1024
- "grad_norm": 16.319608597107443,
1025
- "learning_rate": 1.7396415252139288e-07,
1026
- "logits/chosen": 0.15204815566539764,
1027
- "logits/rejected": 0.42100468277931213,
1028
- "logps/chosen": -377.43377685546875,
1029
- "logps/rejected": -418.968994140625,
1030
- "loss": 0.5296,
1031
- "rewards/accuracies": 0.75,
1032
- "rewards/chosen": -0.7400127649307251,
1033
- "rewards/margins": 0.7931601405143738,
1034
- "rewards/rejected": -1.533172845840454,
1035
- "step": 610
1036
  },
1037
  {
1038
  "epoch": 0.6488749345892203,
1039
- "grad_norm": 14.946981838762534,
1040
- "learning_rate": 1.6530691736402316e-07,
1041
- "logits/chosen": 0.19925448298454285,
1042
- "logits/rejected": 0.5180339813232422,
1043
- "logps/chosen": -383.7248229980469,
1044
- "logps/rejected": -406.205322265625,
1045
- "loss": 0.483,
1046
- "rewards/accuracies": 0.800000011920929,
1047
- "rewards/chosen": -0.8672540783882141,
1048
- "rewards/margins": 0.9261838793754578,
1049
- "rewards/rejected": -1.7934379577636719,
1050
- "step": 620
1051
- },
1052
- {
1053
- "epoch": 0.6593406593406593,
1054
- "grad_norm": 17.582461648265248,
1055
- "learning_rate": 1.5676295169786864e-07,
1056
- "logits/chosen": 0.29265791177749634,
1057
- "logits/rejected": 0.5982542634010315,
1058
- "logps/chosen": -355.6764221191406,
1059
- "logps/rejected": -397.57403564453125,
1060
- "loss": 0.4936,
1061
- "rewards/accuracies": 0.7875000238418579,
1062
- "rewards/chosen": -0.9091702699661255,
1063
- "rewards/margins": 0.7706948518753052,
1064
- "rewards/rejected": -1.6798651218414307,
1065
- "step": 630
1066
  },
1067
  {
1068
  "epoch": 0.6698063840920984,
1069
- "grad_norm": 16.068638596661998,
1070
- "learning_rate": 1.483436823197092e-07,
1071
- "logits/chosen": 0.3285290002822876,
1072
- "logits/rejected": 0.7033597230911255,
1073
- "logps/chosen": -322.0961608886719,
1074
- "logps/rejected": -381.0562438964844,
1075
- "loss": 0.4991,
1076
- "rewards/accuracies": 0.824999988079071,
1077
- "rewards/chosen": -0.8638845682144165,
1078
- "rewards/margins": 0.8404154777526855,
1079
- "rewards/rejected": -1.7043001651763916,
1080
- "step": 640
1081
- },
1082
- {
1083
- "epoch": 0.6802721088435374,
1084
- "grad_norm": 14.977459971087322,
1085
- "learning_rate": 1.4006036925609243e-07,
1086
- "logits/chosen": 0.31715768575668335,
1087
- "logits/rejected": 0.4414668679237366,
1088
- "logps/chosen": -351.3278503417969,
1089
- "logps/rejected": -434.75799560546875,
1090
- "loss": 0.523,
1091
- "rewards/accuracies": 0.75,
1092
- "rewards/chosen": -0.7902597188949585,
1093
- "rewards/margins": 0.7662769556045532,
1094
- "rewards/rejected": -1.5565365552902222,
1095
- "step": 650
1096
  },
1097
  {
1098
  "epoch": 0.6907378335949764,
1099
- "grad_norm": 18.437373509306592,
1100
- "learning_rate": 1.319240907040458e-07,
1101
- "logits/chosen": 0.04035192355513573,
1102
- "logits/rejected": 0.22156131267547607,
1103
- "logps/chosen": -339.9199523925781,
1104
- "logps/rejected": -384.633544921875,
1105
- "loss": 0.512,
1106
- "rewards/accuracies": 0.762499988079071,
1107
- "rewards/chosen": -0.7180854678153992,
1108
- "rewards/margins": 0.6694477200508118,
1109
- "rewards/rejected": -1.3875333070755005,
1110
- "step": 660
1111
- },
1112
- {
1113
- "epoch": 0.7012035583464155,
1114
- "grad_norm": 18.725340795468888,
1115
- "learning_rate": 1.239457282149695e-07,
1116
- "logits/chosen": 0.2705515921115875,
1117
- "logits/rejected": 0.5743520259857178,
1118
- "logps/chosen": -367.09326171875,
1119
- "logps/rejected": -425.185546875,
1120
- "loss": 0.4988,
1121
- "rewards/accuracies": 0.699999988079071,
1122
- "rewards/chosen": -0.9605787992477417,
1123
- "rewards/margins": 0.8168357610702515,
1124
- "rewards/rejected": -1.7774145603179932,
1125
- "step": 670
1126
  },
1127
  {
1128
  "epoch": 0.7116692830978545,
1129
- "grad_norm": 15.807433822852937,
1130
- "learning_rate": 1.1613595214152711e-07,
1131
- "logits/chosen": 0.4535256326198578,
1132
- "logits/rejected": 0.6553749442100525,
1133
- "logps/chosen": -414.3898010253906,
1134
- "logps/rejected": -449.7381286621094,
1135
- "loss": 0.5116,
1136
- "rewards/accuracies": 0.6625000238418579,
1137
- "rewards/chosen": -1.0415313243865967,
1138
- "rewards/margins": 0.6825998425483704,
1139
- "rewards/rejected": -1.7241313457489014,
1140
- "step": 680
1141
- },
1142
- {
1143
- "epoch": 0.7221350078492935,
1144
- "grad_norm": 12.622124400991629,
1145
- "learning_rate": 1.0850520736699362e-07,
1146
- "logits/chosen": 0.19162841141223907,
1147
- "logits/rejected": 0.6098980903625488,
1148
- "logps/chosen": -341.79766845703125,
1149
- "logps/rejected": -394.4463806152344,
1150
- "loss": 0.4674,
1151
- "rewards/accuracies": 0.8125,
1152
- "rewards/chosen": -0.813568115234375,
1153
- "rewards/margins": 0.8326117396354675,
1154
- "rewards/rejected": -1.6461797952651978,
1155
- "step": 690
1156
- },
1157
- {
1158
- "epoch": 0.7326007326007326,
1159
- "grad_norm": 14.178960044408889,
1160
- "learning_rate": 1.0106369933615042e-07,
1161
- "logits/chosen": 0.4778602123260498,
1162
- "logits/rejected": 0.7655819654464722,
1163
- "logps/chosen": -350.98883056640625,
1164
- "logps/rejected": -405.68756103515625,
1165
- "loss": 0.4891,
1166
- "rewards/accuracies": 0.824999988079071,
1167
- "rewards/chosen": -0.9857735633850098,
1168
- "rewards/margins": 0.8198670148849487,
1169
- "rewards/rejected": -1.805640459060669,
1170
- "step": 700
1171
  },
1172
  {
1173
  "epoch": 0.7326007326007326,
1174
- "eval_logits/chosen": 0.3793373703956604,
1175
- "eval_logits/rejected": 0.7265903353691101,
1176
- "eval_logps/chosen": -383.8190002441406,
1177
- "eval_logps/rejected": -441.8805847167969,
1178
- "eval_loss": 0.5096952319145203,
1179
- "eval_rewards/accuracies": 0.7579365372657776,
1180
- "eval_rewards/chosen": -0.9618459939956665,
1181
- "eval_rewards/margins": 0.839381217956543,
1182
- "eval_rewards/rejected": -1.801227331161499,
1183
- "eval_runtime": 227.351,
1184
- "eval_samples_per_second": 8.797,
1185
- "eval_steps_per_second": 0.277,
1186
- "step": 700
1187
- },
1188
- {
1189
- "epoch": 0.7430664573521716,
1190
- "grad_norm": 19.16673815297702,
1191
- "learning_rate": 9.382138040640714e-08,
1192
- "logits/chosen": 0.48337316513061523,
1193
- "logits/rejected": 0.8561640977859497,
1194
- "logps/chosen": -375.72723388671875,
1195
- "logps/rejected": -441.17657470703125,
1196
- "loss": 0.5328,
1197
- "rewards/accuracies": 0.7749999761581421,
1198
- "rewards/chosen": -1.00464928150177,
1199
- "rewards/margins": 0.9693723917007446,
1200
- "rewards/rejected": -1.9740216732025146,
1201
- "step": 710
1202
  },
1203
  {
1204
  "epoch": 0.7535321821036107,
1205
- "grad_norm": 15.335356540441524,
1206
- "learning_rate": 8.678793653740632e-08,
1207
- "logits/chosen": 0.19264373183250427,
1208
- "logits/rejected": 0.5776667594909668,
1209
- "logps/chosen": -418.45806884765625,
1210
- "logps/rejected": -453.5069274902344,
1211
- "loss": 0.4996,
1212
- "rewards/accuracies": 0.7749999761581421,
1213
- "rewards/chosen": -0.7923117876052856,
1214
- "rewards/margins": 0.863797664642334,
1215
- "rewards/rejected": -1.6561095714569092,
1216
- "step": 720
1217
- },
1218
- {
1219
- "epoch": 0.7639979068550498,
1220
- "grad_norm": 19.284158091087093,
1221
- "learning_rate": 7.997277433690983e-08,
1222
- "logits/chosen": 0.3528473973274231,
1223
- "logits/rejected": 0.6614034175872803,
1224
- "logps/chosen": -321.63165283203125,
1225
- "logps/rejected": -384.52557373046875,
1226
- "loss": 0.5141,
1227
- "rewards/accuracies": 0.737500011920929,
1228
- "rewards/chosen": -0.8356879949569702,
1229
- "rewards/margins": 0.7253657579421997,
1230
- "rewards/rejected": -1.5610538721084595,
1231
- "step": 730
1232
  },
1233
  {
1234
  "epoch": 0.7744636316064888,
1235
- "grad_norm": 13.11426429788781,
1236
- "learning_rate": 7.338500848029602e-08,
1237
- "logits/chosen": 0.3238237798213959,
1238
- "logits/rejected": 0.7854956388473511,
1239
- "logps/chosen": -352.84112548828125,
1240
- "logps/rejected": -388.8981018066406,
1241
- "loss": 0.5039,
1242
- "rewards/accuracies": 0.75,
1243
- "rewards/chosen": -0.754696786403656,
1244
- "rewards/margins": 0.8794612884521484,
1245
- "rewards/rejected": -1.6341578960418701,
1246
- "step": 740
1247
- },
1248
- {
1249
- "epoch": 0.7849293563579278,
1250
- "grad_norm": 13.51761512540701,
1251
- "learning_rate": 6.70334495204884e-08,
1252
- "logits/chosen": 0.6912227869033813,
1253
- "logits/rejected": 0.9092944264411926,
1254
- "logps/chosen": -344.513916015625,
1255
- "logps/rejected": -462.43536376953125,
1256
- "loss": 0.5054,
1257
- "rewards/accuracies": 0.75,
1258
- "rewards/chosen": -1.0211327075958252,
1259
- "rewards/margins": 1.0520156621932983,
1260
- "rewards/rejected": -2.073148250579834,
1261
- "step": 750
1262
  },
1263
  {
1264
  "epoch": 0.7953950811093669,
1265
- "grad_norm": 13.59497749011639,
1266
- "learning_rate": 6.092659210462231e-08,
1267
- "logits/chosen": 0.677158772945404,
1268
- "logits/rejected": 0.942747950553894,
1269
- "logps/chosen": -345.63848876953125,
1270
- "logps/rejected": -470.269775390625,
1271
- "loss": 0.5058,
1272
- "rewards/accuracies": 0.762499988079071,
1273
- "rewards/chosen": -1.0826094150543213,
1274
- "rewards/margins": 1.0310587882995605,
1275
- "rewards/rejected": -2.113668203353882,
1276
- "step": 760
1277
- },
1278
- {
1279
- "epoch": 0.8058608058608059,
1280
- "grad_norm": 11.18434909589235,
1281
- "learning_rate": 5.507260361320737e-08,
1282
- "logits/chosen": 0.4663829207420349,
1283
- "logits/rejected": 0.6142369508743286,
1284
- "logps/chosen": -447.09613037109375,
1285
- "logps/rejected": -531.8663330078125,
1286
- "loss": 0.4845,
1287
- "rewards/accuracies": 0.7250000238418579,
1288
- "rewards/chosen": -1.1781532764434814,
1289
- "rewards/margins": 0.7445544004440308,
1290
- "rewards/rejected": -1.9227077960968018,
1291
- "step": 770
1292
  },
1293
  {
1294
  "epoch": 0.8163265306122449,
1295
- "grad_norm": 20.85850537945954,
1296
- "learning_rate": 4.947931323697982e-08,
1297
- "logits/chosen": 0.3884517252445221,
1298
- "logits/rejected": 0.8345146179199219,
1299
- "logps/chosen": -403.45867919921875,
1300
- "logps/rejected": -429.47064208984375,
1301
- "loss": 0.5072,
1302
- "rewards/accuracies": 0.824999988079071,
1303
- "rewards/chosen": -0.74444979429245,
1304
- "rewards/margins": 1.009735107421875,
1305
- "rewards/rejected": -1.7541850805282593,
1306
- "step": 780
1307
- },
1308
- {
1309
- "epoch": 0.826792255363684,
1310
- "grad_norm": 13.990147261782901,
1311
- "learning_rate": 4.415420150605398e-08,
1312
- "logits/chosen": 0.5786755084991455,
1313
- "logits/rejected": 0.9725602865219116,
1314
- "logps/chosen": -360.92657470703125,
1315
- "logps/rejected": -401.515380859375,
1316
- "loss": 0.5104,
1317
- "rewards/accuracies": 0.737500011920929,
1318
- "rewards/chosen": -0.9632360339164734,
1319
- "rewards/margins": 0.7449162006378174,
1320
- "rewards/rejected": -1.708152174949646,
1321
- "step": 790
1322
- },
1323
- {
1324
- "epoch": 0.837257980115123,
1325
- "grad_norm": 16.235900872693833,
1326
- "learning_rate": 3.9104390285376374e-08,
1327
- "logits/chosen": 0.5984079837799072,
1328
- "logits/rejected": 0.9596022367477417,
1329
- "logps/chosen": -344.7926330566406,
1330
- "logps/rejected": -415.468505859375,
1331
- "loss": 0.5052,
1332
- "rewards/accuracies": 0.75,
1333
- "rewards/chosen": -0.9547444581985474,
1334
- "rewards/margins": 0.812140166759491,
1335
- "rewards/rejected": -1.766884446144104,
1336
- "step": 800
1337
  },
1338
  {
1339
  "epoch": 0.837257980115123,
1340
- "eval_logits/chosen": 0.45483413338661194,
1341
- "eval_logits/rejected": 0.8224205374717712,
1342
- "eval_logps/chosen": -380.42578125,
1343
- "eval_logps/rejected": -441.0578308105469,
1344
- "eval_loss": 0.5066840052604675,
1345
- "eval_rewards/accuracies": 0.7539682388305664,
1346
- "eval_rewards/chosen": -0.9279137253761292,
1347
- "eval_rewards/margins": 0.8650867342948914,
1348
- "eval_rewards/rejected": -1.7930004596710205,
1349
- "eval_runtime": 229.2471,
1350
- "eval_samples_per_second": 8.724,
1351
- "eval_steps_per_second": 0.275,
1352
- "step": 800
1353
- },
1354
- {
1355
- "epoch": 0.847723704866562,
1356
- "grad_norm": 12.324929368398774,
1357
- "learning_rate": 3.433663324986208e-08,
1358
- "logits/chosen": 0.3393861651420593,
1359
- "logits/rejected": 0.9386290311813354,
1360
- "logps/chosen": -431.0074157714844,
1361
- "logps/rejected": -448.7564392089844,
1362
- "loss": 0.5038,
1363
- "rewards/accuracies": 0.762499988079071,
1364
- "rewards/chosen": -1.036725640296936,
1365
- "rewards/margins": 0.8281686902046204,
1366
- "rewards/rejected": -1.8648942708969116,
1367
- "step": 810
1368
  },
1369
  {
1370
  "epoch": 0.858189429618001,
1371
- "grad_norm": 18.594511256107943,
1372
- "learning_rate": 2.9857306851953897e-08,
1373
- "logits/chosen": 0.40348076820373535,
1374
- "logits/rejected": 0.633434534072876,
1375
- "logps/chosen": -367.580810546875,
1376
- "logps/rejected": -448.944580078125,
1377
- "loss": 0.4981,
1378
- "rewards/accuracies": 0.7875000238418579,
1379
- "rewards/chosen": -0.8985809087753296,
1380
- "rewards/margins": 1.0237659215927124,
1381
- "rewards/rejected": -1.922347068786621,
1382
- "step": 820
1383
- },
1384
- {
1385
- "epoch": 0.8686551543694401,
1386
- "grad_norm": 13.517371830628358,
1387
- "learning_rate": 2.567240179368185e-08,
1388
- "logits/chosen": 0.6697582006454468,
1389
- "logits/rejected": 0.8797575235366821,
1390
- "logps/chosen": -338.66162109375,
1391
- "logps/rejected": -448.2554626464844,
1392
- "loss": 0.505,
1393
- "rewards/accuracies": 0.75,
1394
- "rewards/chosen": -1.0065336227416992,
1395
- "rewards/margins": 0.8832134008407593,
1396
- "rewards/rejected": -1.889746904373169,
1397
- "step": 830
1398
  },
1399
  {
1400
  "epoch": 0.8791208791208791,
1401
- "grad_norm": 11.944300741165499,
1402
- "learning_rate": 2.1787515014630357e-08,
1403
- "logits/chosen": 0.6317728757858276,
1404
- "logits/rejected": 0.991460919380188,
1405
- "logps/chosen": -384.9150085449219,
1406
- "logps/rejected": -464.59326171875,
1407
- "loss": 0.4855,
1408
- "rewards/accuracies": 0.737500011920929,
1409
- "rewards/chosen": -1.089877724647522,
1410
- "rewards/margins": 0.8564338684082031,
1411
- "rewards/rejected": -1.946311593055725,
1412
- "step": 840
1413
- },
1414
- {
1415
- "epoch": 0.8895866038723181,
1416
- "grad_norm": 16.102405155905764,
1417
- "learning_rate": 1.820784220652766e-08,
1418
- "logits/chosen": 0.6404603719711304,
1419
- "logits/rejected": 0.9315497279167175,
1420
- "logps/chosen": -365.2972106933594,
1421
- "logps/rejected": -447.9342346191406,
1422
- "loss": 0.474,
1423
- "rewards/accuracies": 0.8125,
1424
- "rewards/chosen": -0.9282188415527344,
1425
- "rewards/margins": 1.0321100950241089,
1426
- "rewards/rejected": -1.9603290557861328,
1427
- "step": 850
1428
  },
1429
  {
1430
  "epoch": 0.9000523286237572,
1431
- "grad_norm": 14.744690323569634,
1432
- "learning_rate": 1.4938170864468636e-08,
1433
- "logits/chosen": 0.46648722887039185,
1434
- "logits/rejected": 0.9038979411125183,
1435
- "logps/chosen": -401.80963134765625,
1436
- "logps/rejected": -458.4877014160156,
1437
- "loss": 0.4834,
1438
- "rewards/accuracies": 0.75,
1439
- "rewards/chosen": -1.0028564929962158,
1440
- "rewards/margins": 0.8283039927482605,
1441
- "rewards/rejected": -1.8311609029769897,
1442
- "step": 860
1443
- },
1444
- {
1445
- "epoch": 0.9105180533751962,
1446
- "grad_norm": 16.179311044599356,
1447
- "learning_rate": 1.1982873884064465e-08,
1448
- "logits/chosen": 0.43730059266090393,
1449
- "logits/rejected": 0.8710809946060181,
1450
- "logps/chosen": -334.9444885253906,
1451
- "logps/rejected": -371.6940612792969,
1452
- "loss": 0.4911,
1453
- "rewards/accuracies": 0.762499988079071,
1454
- "rewards/chosen": -0.9459562301635742,
1455
- "rewards/margins": 0.7860196232795715,
1456
- "rewards/rejected": -1.7319759130477905,
1457
- "step": 870
1458
  },
1459
  {
1460
  "epoch": 0.9209837781266352,
1461
- "grad_norm": 14.166797085920381,
1462
- "learning_rate": 9.345903713082304e-09,
1463
- "logits/chosen": 0.4502927362918854,
1464
- "logits/rejected": 0.6843646764755249,
1465
- "logps/chosen": -367.3271179199219,
1466
- "logps/rejected": -403.81610107421875,
1467
- "loss": 0.5062,
1468
- "rewards/accuracies": 0.675000011920929,
1469
- "rewards/chosen": -0.8845589756965637,
1470
- "rewards/margins": 0.7547686696052551,
1471
- "rewards/rejected": -1.6393276453018188,
1472
- "step": 880
1473
- },
1474
- {
1475
- "epoch": 0.9314495028780743,
1476
- "grad_norm": 13.843768410288419,
1477
- "learning_rate": 7.030787065396865e-09,
1478
- "logits/chosen": 0.6794019937515259,
1479
- "logits/rejected": 1.035869836807251,
1480
- "logps/chosen": -337.71124267578125,
1481
- "logps/rejected": -409.55108642578125,
1482
- "loss": 0.5119,
1483
- "rewards/accuracies": 0.75,
1484
- "rewards/chosen": -0.9029817581176758,
1485
- "rewards/margins": 0.996401309967041,
1486
- "rewards/rejected": -1.8993831872940063,
1487
- "step": 890
1488
- },
1489
- {
1490
- "epoch": 0.9419152276295133,
1491
- "grad_norm": 18.40121909693918,
1492
- "learning_rate": 5.04062020432286e-09,
1493
- "logits/chosen": 0.5905700325965881,
1494
- "logits/rejected": 0.9023973345756531,
1495
- "logps/chosen": -391.4457702636719,
1496
- "logps/rejected": -482.791015625,
1497
- "loss": 0.4946,
1498
- "rewards/accuracies": 0.7749999761581421,
1499
- "rewards/chosen": -1.083058476448059,
1500
- "rewards/margins": 0.8590753674507141,
1501
- "rewards/rejected": -1.9421336650848389,
1502
- "step": 900
1503
  },
1504
  {
1505
  "epoch": 0.9419152276295133,
1506
- "eval_logits/chosen": 0.48986998200416565,
1507
- "eval_logits/rejected": 0.865327775478363,
1508
- "eval_logps/chosen": -382.1980285644531,
1509
- "eval_logps/rejected": -444.3302307128906,
1510
- "eval_loss": 0.5059770941734314,
1511
- "eval_rewards/accuracies": 0.7579365372657776,
1512
- "eval_rewards/chosen": -0.945636510848999,
1513
- "eval_rewards/margins": 0.880087673664093,
1514
- "eval_rewards/rejected": -1.8257242441177368,
1515
- "eval_runtime": 227.0574,
1516
- "eval_samples_per_second": 8.808,
1517
- "eval_steps_per_second": 0.277,
1518
- "step": 900
1519
- },
1520
- {
1521
- "epoch": 0.9523809523809523,
1522
- "grad_norm": 16.77062501745593,
1523
- "learning_rate": 3.3780648016376866e-09,
1524
- "logits/chosen": 0.5361930131912231,
1525
- "logits/rejected": 0.99513179063797,
1526
- "logps/chosen": -326.2569885253906,
1527
- "logps/rejected": -383.8536071777344,
1528
- "loss": 0.5026,
1529
- "rewards/accuracies": 0.800000011920929,
1530
- "rewards/chosen": -0.8974063992500305,
1531
- "rewards/margins": 0.9173014760017395,
1532
- "rewards/rejected": -1.8147079944610596,
1533
- "step": 910
1534
  },
1535
  {
1536
  "epoch": 0.9628466771323915,
1537
- "grad_norm": 26.414125387862228,
1538
- "learning_rate": 2.0453443778310766e-09,
1539
- "logits/chosen": 0.4901656210422516,
1540
- "logits/rejected": 0.9345698356628418,
1541
- "logps/chosen": -391.57965087890625,
1542
- "logps/rejected": -420.29833984375,
1543
- "loss": 0.5179,
1544
- "rewards/accuracies": 0.7749999761581421,
1545
- "rewards/chosen": -0.931963324546814,
1546
- "rewards/margins": 0.8185266256332397,
1547
- "rewards/rejected": -1.7504901885986328,
1548
- "step": 920
1549
- },
1550
- {
1551
- "epoch": 0.9733124018838305,
1552
- "grad_norm": 16.660540365959708,
1553
- "learning_rate": 1.0442413283435758e-09,
1554
- "logits/chosen": 0.4114384651184082,
1555
- "logits/rejected": 0.9512575268745422,
1556
- "logps/chosen": -420.1183166503906,
1557
- "logps/rejected": -420.1231384277344,
1558
- "loss": 0.4762,
1559
- "rewards/accuracies": 0.737500011920929,
1560
- "rewards/chosen": -0.8815310597419739,
1561
- "rewards/margins": 0.7737995386123657,
1562
- "rewards/rejected": -1.6553304195404053,
1563
- "step": 930
1564
  },
1565
  {
1566
  "epoch": 0.9837781266352695,
1567
- "grad_norm": 15.455558961505814,
1568
- "learning_rate": 3.760945397705828e-10,
1569
- "logits/chosen": 0.6433460116386414,
1570
- "logits/rejected": 0.7288548350334167,
1571
- "logps/chosen": -335.63751220703125,
1572
- "logps/rejected": -465.32647705078125,
1573
- "loss": 0.4908,
1574
- "rewards/accuracies": 0.675000011920929,
1575
- "rewards/chosen": -0.8627341985702515,
1576
- "rewards/margins": 0.863608181476593,
1577
- "rewards/rejected": -1.7263424396514893,
1578
- "step": 940
1579
- },
1580
- {
1581
- "epoch": 0.9942438513867086,
1582
- "grad_norm": 15.993090751409031,
1583
- "learning_rate": 4.17975992204056e-11,
1584
- "logits/chosen": 0.3498649597167969,
1585
- "logits/rejected": 0.7463272213935852,
1586
- "logps/chosen": -401.7789001464844,
1587
- "logps/rejected": -454.7509765625,
1588
- "loss": 0.4996,
1589
- "rewards/accuracies": 0.75,
1590
- "rewards/chosen": -0.9310733675956726,
1591
- "rewards/margins": 0.8322589993476868,
1592
- "rewards/rejected": -1.7633323669433594,
1593
- "step": 950
1594
  },
1595
  {
1596
- "epoch": 0.9994767137624281,
1597
- "step": 955,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598
  "total_flos": 0.0,
1599
- "train_loss": 0.5461677596207064,
1600
- "train_runtime": 19976.9989,
1601
- "train_samples_per_second": 3.06,
1602
- "train_steps_per_second": 0.048
1603
  }
1604
  ],
1605
  "logging_steps": 10,
1606
- "max_steps": 955,
1607
  "num_input_tokens_seen": 0,
1608
  "num_train_epochs": 1,
1609
- "save_steps": 100,
1610
  "stateful_callbacks": {
1611
  "TrainerControl": {
1612
  "args": {
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9984301412872841,
5
+ "eval_steps": 500,
6
+ "global_step": 477,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0020931449502878076,
13
+ "grad_norm": 9.47907268327104,
14
+ "learning_rate": 1.0416666666666666e-08,
15
+ "logits/chosen": -1.230786919593811,
16
+ "logits/rejected": -1.0849545001983643,
17
+ "logps/chosen": -718.8655395507812,
18
+ "logps/rejected": -589.4813842773438,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
23
  "rewards/rejected": 0.0,
24
  "step": 1
25
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  {
27
  "epoch": 0.020931449502878074,
28
+ "grad_norm": 5.838883380835085,
29
  "learning_rate": 1.0416666666666667e-07,
30
+ "logits/chosen": -1.0577917098999023,
31
+ "logits/rejected": -1.01724374294281,
32
+ "logps/chosen": -573.62109375,
33
+ "logps/rejected": -511.68597412109375,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  "loss": 0.6931,
35
+ "rewards/accuracies": 0.4027777910232544,
36
+ "rewards/chosen": -0.0009698549984022975,
37
+ "rewards/margins": 0.0001799526362447068,
38
+ "rewards/rejected": -0.0011498075909912586,
39
+ "step": 10
40
  },
41
  {
42
  "epoch": 0.04186289900575615,
43
+ "grad_norm": 9.301563416534902,
44
  "learning_rate": 2.0833333333333333e-07,
45
+ "logits/chosen": -1.1377227306365967,
46
+ "logits/rejected": -1.0441679954528809,
47
+ "logps/chosen": -653.8050537109375,
48
+ "logps/rejected": -549.6586303710938,
49
+ "loss": 0.693,
50
+ "rewards/accuracies": 0.5375000238418579,
51
+ "rewards/chosen": 0.00193212297745049,
52
+ "rewards/margins": 0.0014183404855430126,
53
+ "rewards/rejected": 0.000513782724738121,
54
+ "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  },
56
  {
57
  "epoch": 0.06279434850863422,
58
+ "grad_norm": 5.900644482679574,
59
  "learning_rate": 3.1249999999999997e-07,
60
+ "logits/chosen": -1.1196696758270264,
61
+ "logits/rejected": -1.0385626554489136,
62
+ "logps/chosen": -570.8602294921875,
63
+ "logps/rejected": -472.9139709472656,
64
+ "loss": 0.6921,
65
+ "rewards/accuracies": 0.5687500238418579,
66
+ "rewards/chosen": 0.0058663589879870415,
67
+ "rewards/margins": 0.0038539301604032516,
68
+ "rewards/rejected": 0.002012429293245077,
69
+ "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  },
71
  {
72
  "epoch": 0.0837257980115123,
73
+ "grad_norm": 5.585370808899751,
74
  "learning_rate": 4.1666666666666667e-07,
75
+ "logits/chosen": -1.0592528581619263,
76
+ "logits/rejected": -1.0062744617462158,
77
+ "logps/chosen": -552.0910034179688,
78
+ "logps/rejected": -498.30499267578125,
79
+ "loss": 0.6915,
80
+ "rewards/accuracies": 0.53125,
81
+ "rewards/chosen": 0.014180588535964489,
82
+ "rewards/margins": 0.004913664422929287,
83
+ "rewards/rejected": 0.009266925975680351,
84
+ "step": 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  },
86
  {
87
  "epoch": 0.10465724751439037,
88
+ "grad_norm": 6.286129409909711,
89
+ "learning_rate": 4.999731868769026e-07,
90
+ "logits/chosen": -1.0563057661056519,
91
+ "logits/rejected": -0.98540198802948,
92
+ "logps/chosen": -586.5548706054688,
93
+ "logps/rejected": -528.5651245117188,
94
+ "loss": 0.6876,
95
+ "rewards/accuracies": 0.543749988079071,
96
+ "rewards/chosen": 0.0280456505715847,
97
+ "rewards/margins": 0.007971471175551414,
98
+ "rewards/rejected": 0.020074181258678436,
99
+ "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  },
101
  {
102
  "epoch": 0.12558869701726844,
103
+ "grad_norm": 5.696071407355868,
104
+ "learning_rate": 4.990353313429303e-07,
105
+ "logits/chosen": -1.0865626335144043,
106
+ "logits/rejected": -1.014655351638794,
107
+ "logps/chosen": -521.775146484375,
108
+ "logps/rejected": -474.01116943359375,
109
+ "loss": 0.6854,
110
+ "rewards/accuracies": 0.5874999761581421,
111
+ "rewards/chosen": 0.04009874910116196,
112
+ "rewards/margins": 0.020078804343938828,
113
+ "rewards/rejected": 0.02001994475722313,
114
+ "step": 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  },
116
  {
117
  "epoch": 0.14652014652014653,
118
+ "grad_norm": 6.263978649081384,
119
+ "learning_rate": 4.967625656594781e-07,
120
+ "logits/chosen": -0.9924041628837585,
121
+ "logits/rejected": -0.9488169550895691,
122
+ "logps/chosen": -592.423828125,
123
+ "logps/rejected": -557.4447021484375,
124
+ "loss": 0.6804,
125
+ "rewards/accuracies": 0.6187499761581421,
126
+ "rewards/chosen": 0.0283757746219635,
127
+ "rewards/margins": 0.02511376515030861,
128
+ "rewards/rejected": 0.0032620106358081102,
129
+ "step": 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  },
131
  {
132
  "epoch": 0.1674515960230246,
133
+ "grad_norm": 5.9963568545865025,
134
+ "learning_rate": 4.93167072587771e-07,
135
+ "logits/chosen": -1.1233012676239014,
136
+ "logits/rejected": -0.9800901412963867,
137
+ "logps/chosen": -684.3011474609375,
138
+ "logps/rejected": -496.43304443359375,
139
+ "loss": 0.6786,
140
+ "rewards/accuracies": 0.612500011920929,
141
+ "rewards/chosen": 0.026425670832395554,
142
+ "rewards/margins": 0.062486790120601654,
143
+ "rewards/rejected": -0.0360611230134964,
144
+ "step": 80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  },
146
  {
147
  "epoch": 0.18838304552590268,
148
+ "grad_norm": 5.67396482097591,
149
+ "learning_rate": 4.882681251368548e-07,
150
+ "logits/chosen": -1.0131152868270874,
151
+ "logits/rejected": -0.9575031995773315,
152
+ "logps/chosen": -507.4007873535156,
153
+ "logps/rejected": -481.8441467285156,
154
+ "loss": 0.6724,
155
+ "rewards/accuracies": 0.5874999761581421,
156
+ "rewards/chosen": -0.034101374447345734,
157
+ "rewards/margins": 0.04895142465829849,
158
+ "rewards/rejected": -0.08305280655622482,
159
+ "step": 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  },
161
  {
162
  "epoch": 0.20931449502878074,
163
+ "grad_norm": 5.210406356501676,
164
+ "learning_rate": 4.820919832540181e-07,
165
+ "logits/chosen": -1.0613998174667358,
166
+ "logits/rejected": -0.9598807096481323,
167
+ "logps/chosen": -603.19482421875,
168
+ "logps/rejected": -510.19146728515625,
169
+ "loss": 0.6714,
170
+ "rewards/accuracies": 0.643750011920929,
171
+ "rewards/chosen": -0.06239934638142586,
172
+ "rewards/margins": 0.06763242930173874,
173
+ "rewards/rejected": -0.1300317794084549,
174
+ "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  },
176
  {
177
  "epoch": 0.2302459445316588,
178
+ "grad_norm": 8.578008593723833,
179
+ "learning_rate": 4.7467175306295647e-07,
180
+ "logits/chosen": -1.0369672775268555,
181
+ "logits/rejected": -0.958328127861023,
182
+ "logps/chosen": -594.5142822265625,
183
+ "logps/rejected": -536.4580078125,
184
+ "loss": 0.669,
185
+ "rewards/accuracies": 0.6187499761581421,
186
+ "rewards/chosen": -0.0989411324262619,
187
+ "rewards/margins": 0.05328856781125069,
188
+ "rewards/rejected": -0.1522296965122223,
189
+ "step": 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  },
191
  {
192
  "epoch": 0.25117739403453687,
193
+ "grad_norm": 6.68135423044596,
194
+ "learning_rate": 4.6604720940421207e-07,
195
+ "logits/chosen": -0.9307726621627808,
196
+ "logits/rejected": -0.9031961560249329,
197
+ "logps/chosen": -570.8509521484375,
198
+ "logps/rejected": -533.431396484375,
199
+ "loss": 0.6565,
200
+ "rewards/accuracies": 0.6312500238418579,
201
+ "rewards/chosen": -0.11969427764415741,
202
+ "rewards/margins": 0.08741588145494461,
203
+ "rewards/rejected": -0.20711013674736023,
204
+ "step": 120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  },
206
  {
207
  "epoch": 0.272108843537415,
208
+ "grad_norm": 7.467450855587028,
209
+ "learning_rate": 4.5626458262912735e-07,
210
+ "logits/chosen": -0.9687520861625671,
211
+ "logits/rejected": -0.9051095247268677,
212
+ "logps/chosen": -611.1041870117188,
213
+ "logps/rejected": -546.5861206054688,
214
+ "loss": 0.6501,
215
+ "rewards/accuracies": 0.6187499761581421,
216
+ "rewards/chosen": -0.15018528699874878,
217
+ "rewards/margins": 0.07537852972745895,
218
+ "rewards/rejected": -0.22556380927562714,
219
+ "step": 130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  },
221
  {
222
  "epoch": 0.29304029304029305,
223
+ "grad_norm": 7.594546525812229,
224
+ "learning_rate": 4.453763107901675e-07,
225
+ "logits/chosen": -0.9907590746879578,
226
+ "logits/rejected": -0.8972301483154297,
227
+ "logps/chosen": -671.3621826171875,
228
+ "logps/rejected": -525.7132568359375,
229
+ "loss": 0.6587,
230
+ "rewards/accuracies": 0.6312500238418579,
231
+ "rewards/chosen": -0.1575426608324051,
232
+ "rewards/margins": 0.13864362239837646,
233
+ "rewards/rejected": -0.29618629813194275,
234
+ "step": 140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  },
236
  {
237
  "epoch": 0.3139717425431711,
238
+ "grad_norm": 6.58340261144849,
239
+ "learning_rate": 4.3344075855595097e-07,
240
+ "logits/chosen": -0.9956567883491516,
241
+ "logits/rejected": -0.9212522506713867,
242
+ "logps/chosen": -606.5499877929688,
243
+ "logps/rejected": -520.2662963867188,
244
+ "loss": 0.6647,
245
+ "rewards/accuracies": 0.59375,
246
+ "rewards/chosen": -0.2668375074863434,
247
+ "rewards/margins": 0.08754570782184601,
248
+ "rewards/rejected": -0.3543832004070282,
249
+ "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  },
251
  {
252
  "epoch": 0.3349031920460492,
253
+ "grad_norm": 5.870649591098086,
254
+ "learning_rate": 4.2052190435769554e-07,
255
+ "logits/chosen": -1.033860683441162,
256
+ "logits/rejected": -0.9170506596565247,
257
+ "logps/chosen": -582.2027587890625,
258
+ "logps/rejected": -498.9814453125,
259
+ "loss": 0.6517,
260
+ "rewards/accuracies": 0.5625,
261
+ "rewards/chosen": -0.22115787863731384,
262
+ "rewards/margins": 0.11934684216976166,
263
+ "rewards/rejected": -0.3405047357082367,
264
+ "step": 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  },
266
  {
267
  "epoch": 0.35583464154892724,
268
+ "grad_norm": 13.578782971041285,
269
+ "learning_rate": 4.0668899744407567e-07,
270
+ "logits/chosen": -0.9048398733139038,
271
+ "logits/rejected": -0.8422538638114929,
272
+ "logps/chosen": -563.29296875,
273
+ "logps/rejected": -498.3485412597656,
274
+ "loss": 0.6548,
275
+ "rewards/accuracies": 0.5625,
276
+ "rewards/chosen": -0.24346908926963806,
277
+ "rewards/margins": 0.06627969443798065,
278
+ "rewards/rejected": -0.30974873900413513,
279
+ "step": 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  },
281
  {
282
  "epoch": 0.37676609105180536,
283
+ "grad_norm": 6.214662115474946,
284
+ "learning_rate": 3.920161866827889e-07,
285
+ "logits/chosen": -0.9318671226501465,
286
+ "logits/rejected": -0.8828659057617188,
287
+ "logps/chosen": -550.6251220703125,
288
+ "logps/rejected": -512.2371215820312,
289
+ "loss": 0.6429,
290
+ "rewards/accuracies": 0.6312500238418579,
291
+ "rewards/chosen": -0.344641774892807,
292
+ "rewards/margins": 0.11328216642141342,
293
+ "rewards/rejected": -0.457923948764801,
294
+ "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  },
296
  {
297
  "epoch": 0.3976975405546834,
298
+ "grad_norm": 5.864688178597999,
299
+ "learning_rate": 3.765821230985757e-07,
300
+ "logits/chosen": -0.867976188659668,
301
+ "logits/rejected": -0.8423366546630859,
302
+ "logps/chosen": -533.2942504882812,
303
+ "logps/rejected": -540.0887451171875,
304
+ "loss": 0.648,
305
+ "rewards/accuracies": 0.5687500238418579,
306
+ "rewards/chosen": -0.3320869505405426,
307
+ "rewards/margins": 0.07606077194213867,
308
+ "rewards/rejected": -0.4081477224826813,
309
+ "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  },
311
  {
312
  "epoch": 0.4186289900575615,
313
+ "grad_norm": 6.711360012717955,
314
+ "learning_rate": 3.604695382782159e-07,
315
+ "logits/chosen": -0.8423722982406616,
316
+ "logits/rejected": -0.8294059634208679,
317
+ "logps/chosen": -549.0567016601562,
318
+ "logps/rejected": -547.9012451171875,
319
+ "loss": 0.649,
320
+ "rewards/accuracies": 0.59375,
321
+ "rewards/chosen": -0.40123528242111206,
322
+ "rewards/margins": 0.09933426231145859,
323
+ "rewards/rejected": -0.5005695819854736,
324
+ "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  },
326
  {
327
  "epoch": 0.43956043956043955,
328
+ "grad_norm": 6.225831058229792,
329
+ "learning_rate": 3.4376480090239047e-07,
330
+ "logits/chosen": -0.923250675201416,
331
+ "logits/rejected": -0.7901118397712708,
332
+ "logps/chosen": -675.4610595703125,
333
+ "logps/rejected": -569.81103515625,
334
+ "loss": 0.6509,
335
+ "rewards/accuracies": 0.65625,
336
+ "rewards/chosen": -0.47761064767837524,
337
+ "rewards/margins": 0.19006164371967316,
338
+ "rewards/rejected": -0.6676722764968872,
339
+ "step": 210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  },
341
  {
342
  "epoch": 0.4604918890633176,
343
+ "grad_norm": 7.218199069021195,
344
+ "learning_rate": 3.265574537815398e-07,
345
+ "logits/chosen": -0.8173942565917969,
346
+ "logits/rejected": -0.7971839308738708,
347
+ "logps/chosen": -600.4532470703125,
348
+ "logps/rejected": -533.3508911132812,
349
+ "loss": 0.6464,
350
+ "rewards/accuracies": 0.637499988079071,
351
+ "rewards/chosen": -0.5661253929138184,
352
+ "rewards/margins": 0.1143341064453125,
353
+ "rewards/rejected": -0.6804595589637756,
354
+ "step": 220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
  },
356
  {
357
  "epoch": 0.48142333856619574,
358
+ "grad_norm": 6.578950073834564,
359
+ "learning_rate": 3.0893973387735683e-07,
360
+ "logits/chosen": -0.8965595364570618,
361
+ "logits/rejected": -0.8051449060440063,
362
+ "logps/chosen": -582.8607177734375,
363
+ "logps/rejected": -513.877197265625,
364
+ "loss": 0.6402,
365
+ "rewards/accuracies": 0.65625,
366
+ "rewards/chosen": -0.4713449478149414,
367
+ "rewards/margins": 0.2214008867740631,
368
+ "rewards/rejected": -0.6927456855773926,
369
+ "step": 230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  },
371
  {
372
  "epoch": 0.5023547880690737,
373
+ "grad_norm": 28.85016685629195,
374
+ "learning_rate": 2.910060778827554e-07,
375
+ "logits/chosen": -0.8767781257629395,
376
+ "logits/rejected": -0.789887547492981,
377
+ "logps/chosen": -608.2216186523438,
378
+ "logps/rejected": -573.2482299804688,
379
+ "loss": 0.6291,
380
+ "rewards/accuracies": 0.668749988079071,
381
+ "rewards/chosen": -0.34922102093696594,
382
+ "rewards/margins": 0.1948089301586151,
383
+ "rewards/rejected": -0.5440298914909363,
384
+ "step": 240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
  },
386
  {
387
  "epoch": 0.5232862375719518,
388
+ "grad_norm": 6.578871015983815,
389
+ "learning_rate": 2.7285261601056697e-07,
390
+ "logits/chosen": -0.8761470913887024,
391
+ "logits/rejected": -0.7583174705505371,
392
+ "logps/chosen": -628.8199462890625,
393
+ "logps/rejected": -549.8895263671875,
394
+ "loss": 0.6282,
395
+ "rewards/accuracies": 0.6187499761581421,
396
+ "rewards/chosen": -0.4877597391605377,
397
+ "rewards/margins": 0.20446522533893585,
398
+ "rewards/rejected": -0.6922250390052795,
399
+ "step": 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  },
401
  {
402
  "epoch": 0.54421768707483,
403
+ "grad_norm": 6.800335993585739,
404
+ "learning_rate": 2.5457665670441937e-07,
405
+ "logits/chosen": -0.8889079093933105,
406
+ "logits/rejected": -0.8609440922737122,
407
+ "logps/chosen": -573.7484741210938,
408
+ "logps/rejected": -539.7342529296875,
409
+ "loss": 0.6328,
410
+ "rewards/accuracies": 0.643750011920929,
411
+ "rewards/chosen": -0.5872923135757446,
412
+ "rewards/margins": 0.2269112765789032,
413
+ "rewards/rejected": -0.8142035603523254,
414
+ "step": 260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
  },
416
  {
417
  "epoch": 0.565149136577708,
418
+ "grad_norm": 5.584142418009703,
419
+ "learning_rate": 2.3627616503391812e-07,
420
+ "logits/chosen": -0.8820476531982422,
421
+ "logits/rejected": -0.8141148686408997,
422
+ "logps/chosen": -666.2066040039062,
423
+ "logps/rejected": -589.15869140625,
424
+ "loss": 0.6345,
425
+ "rewards/accuracies": 0.6937500238418579,
426
+ "rewards/chosen": -0.576594889163971,
427
+ "rewards/margins": 0.2597651183605194,
428
+ "rewards/rejected": -0.836359977722168,
429
+ "step": 270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
  },
431
  {
432
  "epoch": 0.5860805860805861,
433
+ "grad_norm": 31.838586331839302,
434
+ "learning_rate": 2.1804923757009882e-07,
435
+ "logits/chosen": -0.756884753704071,
436
+ "logits/rejected": -0.6859643459320068,
437
+ "logps/chosen": -635.4517211914062,
438
+ "logps/rejected": -524.60205078125,
439
+ "loss": 0.6503,
440
+ "rewards/accuracies": 0.6187499761581421,
441
+ "rewards/chosen": -0.6004620790481567,
442
+ "rewards/margins": 0.20423462986946106,
443
+ "rewards/rejected": -0.8046967387199402,
444
+ "step": 280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
  },
446
  {
447
  "epoch": 0.6070120355834642,
448
+ "grad_norm": 5.602557122403262,
449
+ "learning_rate": 1.9999357655598891e-07,
450
+ "logits/chosen": -0.7888242602348328,
451
+ "logits/rejected": -0.7444680333137512,
452
+ "logps/chosen": -548.9427490234375,
453
+ "logps/rejected": -568.7989501953125,
454
+ "loss": 0.6252,
455
+ "rewards/accuracies": 0.5562499761581421,
456
+ "rewards/chosen": -0.608909010887146,
457
+ "rewards/margins": 0.11673343181610107,
458
+ "rewards/rejected": -0.7256424427032471,
459
+ "step": 290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
  },
461
  {
462
  "epoch": 0.6279434850863422,
463
+ "grad_norm": 6.998318357540905,
464
+ "learning_rate": 1.8220596619089573e-07,
465
+ "logits/chosen": -0.8254985809326172,
466
+ "logits/rejected": -0.7185059785842896,
467
+ "logps/chosen": -687.5657958984375,
468
+ "logps/rejected": -611.7520141601562,
469
+ "loss": 0.6311,
470
+ "rewards/accuracies": 0.6499999761581421,
471
+ "rewards/chosen": -0.43275612592697144,
472
+ "rewards/margins": 0.2093551605939865,
473
+ "rewards/rejected": -0.6421113014221191,
474
+ "step": 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475
  },
476
  {
477
  "epoch": 0.6488749345892203,
478
+ "grad_norm": 7.220451533721462,
479
+ "learning_rate": 1.647817538357072e-07,
480
+ "logits/chosen": -0.8472205400466919,
481
+ "logits/rejected": -0.7638236880302429,
482
+ "logps/chosen": -684.5982666015625,
483
+ "logps/rejected": -590.0093383789062,
484
+ "loss": 0.6214,
485
+ "rewards/accuracies": 0.6937500238418579,
486
+ "rewards/chosen": -0.48125261068344116,
487
+ "rewards/margins": 0.29475826025009155,
488
+ "rewards/rejected": -0.7760108709335327,
489
+ "step": 310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
  },
491
  {
492
  "epoch": 0.6698063840920984,
493
+ "grad_norm": 7.210540321962118,
494
+ "learning_rate": 1.478143389201113e-07,
495
+ "logits/chosen": -0.811663031578064,
496
+ "logits/rejected": -0.713984489440918,
497
+ "logps/chosen": -566.2530517578125,
498
+ "logps/rejected": -521.43798828125,
499
+ "loss": 0.6248,
500
+ "rewards/accuracies": 0.612500011920929,
501
+ "rewards/chosen": -0.6007347106933594,
502
+ "rewards/margins": 0.20972177386283875,
503
+ "rewards/rejected": -0.8104564547538757,
504
+ "step": 320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  },
506
  {
507
  "epoch": 0.6907378335949764,
508
+ "grad_norm": 7.764012314675009,
509
+ "learning_rate": 1.3139467229135998e-07,
510
+ "logits/chosen": -0.855695903301239,
511
+ "logits/rejected": -0.8395960927009583,
512
+ "logps/chosen": -602.5452880859375,
513
+ "logps/rejected": -605.7221069335938,
514
+ "loss": 0.6317,
515
+ "rewards/accuracies": 0.65625,
516
+ "rewards/chosen": -0.5449889898300171,
517
+ "rewards/margins": 0.2217901200056076,
518
+ "rewards/rejected": -0.7667790651321411,
519
+ "step": 330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
  },
521
  {
522
  "epoch": 0.7116692830978545,
523
+ "grad_norm": 6.9322528450090655,
524
+ "learning_rate": 1.1561076868822755e-07,
525
+ "logits/chosen": -0.7998368144035339,
526
+ "logits/rejected": -0.7325125932693481,
527
+ "logps/chosen": -688.9720458984375,
528
+ "logps/rejected": -625.4348754882812,
529
+ "loss": 0.6221,
530
+ "rewards/accuracies": 0.6187499761581421,
531
+ "rewards/chosen": -0.6084362268447876,
532
+ "rewards/margins": 0.19718536734580994,
533
+ "rewards/rejected": -0.8056216239929199,
534
+ "step": 340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
  },
536
  {
537
  "epoch": 0.7326007326007326,
538
+ "grad_norm": 7.690253261324418,
539
+ "learning_rate": 1.0054723495346482e-07,
540
+ "logits/chosen": -0.8744640350341797,
541
+ "logits/rejected": -0.7735085487365723,
542
+ "logps/chosen": -619.5235595703125,
543
+ "logps/rejected": -542.647216796875,
544
+ "loss": 0.6136,
545
+ "rewards/accuracies": 0.6937500238418579,
546
+ "rewards/chosen": -0.6736162304878235,
547
+ "rewards/margins": 0.2603400647640228,
548
+ "rewards/rejected": -0.9339563250541687,
549
+ "step": 350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
  },
551
  {
552
  "epoch": 0.7535321821036107,
553
+ "grad_norm": 6.694067556584699,
554
+ "learning_rate": 8.628481651367875e-08,
555
+ "logits/chosen": -0.8514236211776733,
556
+ "logits/rejected": -0.7467787265777588,
557
+ "logps/chosen": -694.3283081054688,
558
+ "logps/rejected": -610.6889038085938,
559
+ "loss": 0.6321,
560
+ "rewards/accuracies": 0.65625,
561
+ "rewards/chosen": -0.6568378210067749,
562
+ "rewards/margins": 0.2652947008609772,
563
+ "rewards/rejected": -0.9221324920654297,
564
+ "step": 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565
  },
566
  {
567
  "epoch": 0.7744636316064888,
568
+ "grad_norm": 7.38268786256899,
569
+ "learning_rate": 7.289996455765748e-08,
570
+ "logits/chosen": -0.8463318943977356,
571
+ "logits/rejected": -0.7574175000190735,
572
+ "logps/chosen": -625.852294921875,
573
+ "logps/rejected": -545.2384643554688,
574
+ "loss": 0.6305,
575
+ "rewards/accuracies": 0.6499999761581421,
576
+ "rewards/chosen": -0.7398477792739868,
577
+ "rewards/margins": 0.23134395480155945,
578
+ "rewards/rejected": -0.9711917042732239,
579
+ "step": 370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
  },
581
  {
582
  "epoch": 0.7953950811093669,
583
+ "grad_norm": 5.248799625689845,
584
+ "learning_rate": 6.046442623320145e-08,
585
+ "logits/chosen": -0.7247274518013,
586
+ "logits/rejected": -0.6995854377746582,
587
+ "logps/chosen": -577.6473388671875,
588
+ "logps/rejected": -588.8287353515625,
589
+ "loss": 0.616,
590
+ "rewards/accuracies": 0.65625,
591
+ "rewards/chosen": -0.7117009162902832,
592
+ "rewards/margins": 0.2788507342338562,
593
+ "rewards/rejected": -0.9905516505241394,
594
+ "step": 380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595
  },
596
  {
597
  "epoch": 0.8163265306122449,
598
+ "grad_norm": 7.084262198459761,
599
+ "learning_rate": 4.904486005914027e-08,
600
+ "logits/chosen": -0.829517662525177,
601
+ "logits/rejected": -0.7548889517784119,
602
+ "logps/chosen": -700.9087524414062,
603
+ "logps/rejected": -682.0953369140625,
604
+ "loss": 0.6246,
605
+ "rewards/accuracies": 0.6812499761581421,
606
+ "rewards/chosen": -0.6038817167282104,
607
+ "rewards/margins": 0.30560049414634705,
608
+ "rewards/rejected": -0.9094821810722351,
609
+ "step": 390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
  },
611
  {
612
  "epoch": 0.837257980115123,
613
+ "grad_norm": 7.913104796663113,
614
+ "learning_rate": 3.8702478614051345e-08,
615
+ "logits/chosen": -0.7607153654098511,
616
+ "logits/rejected": -0.6797865629196167,
617
+ "logps/chosen": -596.0244140625,
618
+ "logps/rejected": -548.5865478515625,
619
+ "loss": 0.6249,
620
+ "rewards/accuracies": 0.7250000238418579,
621
+ "rewards/chosen": -0.6176111698150635,
622
+ "rewards/margins": 0.3056526184082031,
623
+ "rewards/rejected": -0.9232637286186218,
624
+ "step": 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
625
  },
626
  {
627
  "epoch": 0.858189429618001,
628
+ "grad_norm": 7.987115230704847,
629
+ "learning_rate": 2.9492720416985e-08,
630
+ "logits/chosen": -0.9084607362747192,
631
+ "logits/rejected": -0.8099187612533569,
632
+ "logps/chosen": -679.3424682617188,
633
+ "logps/rejected": -576.279541015625,
634
+ "loss": 0.6373,
635
+ "rewards/accuracies": 0.643750011920929,
636
+ "rewards/chosen": -0.6457342505455017,
637
+ "rewards/margins": 0.24207277595996857,
638
+ "rewards/rejected": -0.8878068923950195,
639
+ "step": 410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640
  },
641
  {
642
  "epoch": 0.8791208791208791,
643
+ "grad_norm": 7.681967033914715,
644
+ "learning_rate": 2.1464952759020856e-08,
645
+ "logits/chosen": -0.7410570383071899,
646
+ "logits/rejected": -0.7037540674209595,
647
+ "logps/chosen": -659.6639404296875,
648
+ "logps/rejected": -604.430908203125,
649
+ "loss": 0.6255,
650
+ "rewards/accuracies": 0.606249988079071,
651
+ "rewards/chosen": -0.7325552105903625,
652
+ "rewards/margins": 0.17839916050434113,
653
+ "rewards/rejected": -0.9109543561935425,
654
+ "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
655
  },
656
  {
657
  "epoch": 0.9000523286237572,
658
+ "grad_norm": 7.1477705897451305,
659
+ "learning_rate": 1.4662207078575684e-08,
660
+ "logits/chosen": -0.7820992469787598,
661
+ "logits/rejected": -0.6892222762107849,
662
+ "logps/chosen": -650.0975952148438,
663
+ "logps/rejected": -599.6932373046875,
664
+ "loss": 0.6156,
665
+ "rewards/accuracies": 0.6312500238418579,
666
+ "rewards/chosen": -0.7235848307609558,
667
+ "rewards/margins": 0.22484640777111053,
668
+ "rewards/rejected": -0.9484313130378723,
669
+ "step": 430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
670
  },
671
  {
672
  "epoch": 0.9209837781266352,
673
+ "grad_norm": 6.669081243461517,
674
+ "learning_rate": 9.12094829893642e-09,
675
+ "logits/chosen": -0.8992211222648621,
676
+ "logits/rejected": -0.8180851936340332,
677
+ "logps/chosen": -610.1431274414062,
678
+ "logps/rejected": -536.3157958984375,
679
+ "loss": 0.6183,
680
+ "rewards/accuracies": 0.5874999761581421,
681
+ "rewards/chosen": -0.6954323649406433,
682
+ "rewards/margins": 0.1654001623392105,
683
+ "rewards/rejected": -0.8608325719833374,
684
+ "step": 440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
685
  },
686
  {
687
  "epoch": 0.9419152276295133,
688
+ "grad_norm": 6.335558071697887,
689
+ "learning_rate": 4.8708793644441086e-09,
690
+ "logits/chosen": -0.7162154912948608,
691
+ "logits/rejected": -0.6556223034858704,
692
+ "logps/chosen": -621.4244995117188,
693
+ "logps/rejected": -581.2723388671875,
694
+ "loss": 0.6264,
695
+ "rewards/accuracies": 0.637499988079071,
696
+ "rewards/chosen": -0.6589775681495667,
697
+ "rewards/margins": 0.23211708664894104,
698
+ "rewards/rejected": -0.8910946846008301,
699
+ "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700
  },
701
  {
702
  "epoch": 0.9628466771323915,
703
+ "grad_norm": 7.739665745197065,
704
+ "learning_rate": 1.9347820230782295e-09,
705
+ "logits/chosen": -0.8232609033584595,
706
+ "logits/rejected": -0.709001898765564,
707
+ "logps/chosen": -619.5667724609375,
708
+ "logps/rejected": -542.3757934570312,
709
+ "loss": 0.6258,
710
+ "rewards/accuracies": 0.6812499761581421,
711
+ "rewards/chosen": -0.7047165632247925,
712
+ "rewards/margins": 0.2661026418209076,
713
+ "rewards/rejected": -0.9708192944526672,
714
+ "step": 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715
  },
716
  {
717
  "epoch": 0.9837781266352695,
718
+ "grad_norm": 8.112040008506881,
719
+ "learning_rate": 3.2839470889836627e-10,
720
+ "logits/chosen": -0.80314701795578,
721
+ "logits/rejected": -0.7384933233261108,
722
+ "logps/chosen": -649.725830078125,
723
+ "logps/rejected": -612.681884765625,
724
+ "loss": 0.6147,
725
+ "rewards/accuracies": 0.612500011920929,
726
+ "rewards/chosen": -0.6486014127731323,
727
+ "rewards/margins": 0.18189950287342072,
728
+ "rewards/rejected": -0.8305009007453918,
729
+ "step": 470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
  },
731
  {
732
+ "epoch": 0.9984301412872841,
733
+ "eval_logits/chosen": -0.7554221153259277,
734
+ "eval_logits/rejected": -0.6965439319610596,
735
+ "eval_logps/chosen": -651.883544921875,
736
+ "eval_logps/rejected": -625.4310913085938,
737
+ "eval_loss": 0.6284215450286865,
738
+ "eval_rewards/accuracies": 0.640625,
739
+ "eval_rewards/chosen": -0.6611213684082031,
740
+ "eval_rewards/margins": 0.23633190989494324,
741
+ "eval_rewards/rejected": -0.8974533081054688,
742
+ "eval_runtime": 233.0068,
743
+ "eval_samples_per_second": 8.583,
744
+ "eval_steps_per_second": 0.137,
745
+ "step": 477
746
+ },
747
+ {
748
+ "epoch": 0.9984301412872841,
749
+ "step": 477,
750
  "total_flos": 0.0,
751
+ "train_loss": 0.6453013030238122,
752
+ "train_runtime": 27951.2038,
753
+ "train_samples_per_second": 2.187,
754
+ "train_steps_per_second": 0.017
755
  }
756
  ],
757
  "logging_steps": 10,
758
+ "max_steps": 477,
759
  "num_input_tokens_seen": 0,
760
  "num_train_epochs": 1,
761
+ "save_steps": 500,
762
  "stateful_callbacks": {
763
  "TrainerControl": {
764
  "args": {