File size: 8,903 Bytes
32f7d5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9984,
  "eval_steps": 100,
  "global_step": 156,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.01,
      "learning_rate": 3.125e-07,
      "logits/chosen": -2.89351749420166,
      "logits/rejected": -2.7752203941345215,
      "logps/chosen": -345.7324523925781,
      "logps/rejected": -319.42047119140625,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.06,
      "learning_rate": 3.125e-06,
      "logits/chosen": -2.8027219772338867,
      "logits/rejected": -2.7471206188201904,
      "logps/chosen": -255.04139709472656,
      "logps/rejected": -252.81085205078125,
      "loss": 0.6928,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": 0.00026138738030567765,
      "rewards/margins": 0.00045877473894506693,
      "rewards/rejected": -0.00019738740229513496,
      "step": 10
    },
    {
      "epoch": 0.13,
      "learning_rate": 4.989935734988098e-06,
      "logits/chosen": -2.771097183227539,
      "logits/rejected": -2.7165303230285645,
      "logps/chosen": -277.1825256347656,
      "logps/rejected": -257.04290771484375,
      "loss": 0.6888,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": 0.012711542658507824,
      "rewards/margins": 0.009278710931539536,
      "rewards/rejected": 0.003432832658290863,
      "step": 20
    },
    {
      "epoch": 0.19,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": -2.721252918243408,
      "logits/rejected": -2.657611846923828,
      "logps/chosen": -274.41973876953125,
      "logps/rejected": -246.46884155273438,
      "loss": 0.6795,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": 0.033503077924251556,
      "rewards/margins": 0.027159536257386208,
      "rewards/rejected": 0.006343540735542774,
      "step": 30
    },
    {
      "epoch": 0.26,
      "learning_rate": 4.646121984004666e-06,
      "logits/chosen": -2.7624173164367676,
      "logits/rejected": -2.654289960861206,
      "logps/chosen": -268.4293212890625,
      "logps/rejected": -251.5210418701172,
      "loss": 0.6695,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.03509635850787163,
      "rewards/margins": 0.057297080755233765,
      "rewards/rejected": -0.022200724110007286,
      "step": 40
    },
    {
      "epoch": 0.32,
      "learning_rate": 4.3069871595684795e-06,
      "logits/chosen": -2.6763150691986084,
      "logits/rejected": -2.6229124069213867,
      "logps/chosen": -274.2480163574219,
      "logps/rejected": -281.6312561035156,
      "loss": 0.6605,
      "rewards/accuracies": 0.6656249761581421,
      "rewards/chosen": -0.0026254388503730297,
      "rewards/margins": 0.07845916599035263,
      "rewards/rejected": -0.08108460903167725,
      "step": 50
    },
    {
      "epoch": 0.38,
      "learning_rate": 3.8772424536302565e-06,
      "logits/chosen": -2.6876912117004395,
      "logits/rejected": -2.615192413330078,
      "logps/chosen": -287.15826416015625,
      "logps/rejected": -274.56610107421875,
      "loss": 0.6506,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.033494334667921066,
      "rewards/margins": 0.11426861584186554,
      "rewards/rejected": -0.1477629393339157,
      "step": 60
    },
    {
      "epoch": 0.45,
      "learning_rate": 3.3784370602033572e-06,
      "logits/chosen": -2.626436471939087,
      "logits/rejected": -2.5704116821289062,
      "logps/chosen": -304.19744873046875,
      "logps/rejected": -293.0374450683594,
      "loss": 0.6431,
      "rewards/accuracies": 0.721875011920929,
      "rewards/chosen": -0.03456612303853035,
      "rewards/margins": 0.1283847689628601,
      "rewards/rejected": -0.16295088827610016,
      "step": 70
    },
    {
      "epoch": 0.51,
      "learning_rate": 2.835583164544139e-06,
      "logits/chosen": -2.6321258544921875,
      "logits/rejected": -2.5355112552642822,
      "logps/chosen": -302.4496765136719,
      "logps/rejected": -276.21484375,
      "loss": 0.6341,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.04431954026222229,
      "rewards/margins": 0.14259546995162964,
      "rewards/rejected": -0.18691501021385193,
      "step": 80
    },
    {
      "epoch": 0.58,
      "learning_rate": 2.2759017277414165e-06,
      "logits/chosen": -2.5837318897247314,
      "logits/rejected": -2.5942935943603516,
      "logps/chosen": -292.6695861816406,
      "logps/rejected": -311.11724853515625,
      "loss": 0.625,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.09852292388677597,
      "rewards/margins": 0.17917943000793457,
      "rewards/rejected": -0.27770236134529114,
      "step": 90
    },
    {
      "epoch": 0.64,
      "learning_rate": 1.7274575140626318e-06,
      "logits/chosen": -2.6302809715270996,
      "logits/rejected": -2.5902750492095947,
      "logps/chosen": -290.39288330078125,
      "logps/rejected": -273.9955749511719,
      "loss": 0.6297,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.12990322709083557,
      "rewards/margins": 0.16275252401828766,
      "rewards/rejected": -0.2926557660102844,
      "step": 100
    },
    {
      "epoch": 0.64,
      "eval_logits/chosen": -2.6242196559906006,
      "eval_logits/rejected": -2.5397887229919434,
      "eval_logps/chosen": -295.551025390625,
      "eval_logps/rejected": -286.3282165527344,
      "eval_loss": 0.6285393834114075,
      "eval_rewards/accuracies": 0.6880000233650208,
      "eval_rewards/chosen": -0.11511250585317612,
      "eval_rewards/margins": 0.17301124334335327,
      "eval_rewards/rejected": -0.2881237268447876,
      "eval_runtime": 383.7826,
      "eval_samples_per_second": 5.211,
      "eval_steps_per_second": 0.651,
      "step": 100
    },
    {
      "epoch": 0.7,
      "learning_rate": 1.217751806485235e-06,
      "logits/chosen": -2.632079601287842,
      "logits/rejected": -2.526585817337036,
      "logps/chosen": -289.1620178222656,
      "logps/rejected": -275.42742919921875,
      "loss": 0.6179,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.06891096383333206,
      "rewards/margins": 0.20267972350120544,
      "rewards/rejected": -0.2715906798839569,
      "step": 110
    },
    {
      "epoch": 0.77,
      "learning_rate": 7.723433775328385e-07,
      "logits/chosen": -2.5513744354248047,
      "logits/rejected": -2.548856496810913,
      "logps/chosen": -273.984619140625,
      "logps/rejected": -288.4661865234375,
      "loss": 0.6253,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.10123135894536972,
      "rewards/margins": 0.16150596737861633,
      "rewards/rejected": -0.26273733377456665,
      "step": 120
    },
    {
      "epoch": 0.83,
      "learning_rate": 4.1356686569674344e-07,
      "logits/chosen": -2.609344244003296,
      "logits/rejected": -2.545771837234497,
      "logps/chosen": -291.45574951171875,
      "logps/rejected": -301.5300598144531,
      "loss": 0.6234,
      "rewards/accuracies": 0.690625011920929,
      "rewards/chosen": -0.1096922755241394,
      "rewards/margins": 0.2059115469455719,
      "rewards/rejected": -0.3156037926673889,
      "step": 130
    },
    {
      "epoch": 0.9,
      "learning_rate": 1.59412823400657e-07,
      "logits/chosen": -2.6051926612854004,
      "logits/rejected": -2.4772098064422607,
      "logps/chosen": -313.3153991699219,
      "logps/rejected": -287.3096923828125,
      "loss": 0.6144,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.09460710734128952,
      "rewards/margins": 0.21818473935127258,
      "rewards/rejected": -0.3127918243408203,
      "step": 140
    },
    {
      "epoch": 0.96,
      "learning_rate": 2.262559558016325e-08,
      "logits/chosen": -2.5833957195281982,
      "logits/rejected": -2.4759347438812256,
      "logps/chosen": -300.4779968261719,
      "logps/rejected": -280.8038024902344,
      "loss": 0.6195,
      "rewards/accuracies": 0.746874988079071,
      "rewards/chosen": -0.11339084059000015,
      "rewards/margins": 0.22617027163505554,
      "rewards/rejected": -0.3395610749721527,
      "step": 150
    },
    {
      "epoch": 1.0,
      "step": 156,
      "total_flos": 0.0,
      "train_loss": 0.6447198520868253,
      "train_runtime": 7178.8885,
      "train_samples_per_second": 2.786,
      "train_steps_per_second": 0.022
    }
  ],
  "logging_steps": 10,
  "max_steps": 156,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}