li-muyang commited on
Commit
21c0f0f
·
verified ·
1 Parent(s): 2ddf98f

Model save

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - trl
5
+ - dpo
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: zephyr-7b-dpo-full
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # zephyr-7b-dpo-full
16
+
17
+ This model was trained from scratch on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.5222
20
+ - Rewards/chosen: -0.7529
21
+ - Rewards/rejected: -1.5582
22
+ - Rewards/accuracies: 0.7695
23
+ - Rewards/margins: 0.8053
24
+ - Logps/rejected: -419.2669
25
+ - Logps/chosen: -340.9086
26
+ - Logits/rejected: -2.3048
27
+ - Logits/chosen: -2.3305
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-07
47
+ - train_batch_size: 8
48
+ - eval_batch_size: 8
49
+ - seed: 42
50
+ - distributed_type: multi-GPU
51
+ - num_devices: 8
52
+ - gradient_accumulation_steps: 2
53
+ - total_train_batch_size: 128
54
+ - total_eval_batch_size: 64
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 1
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.5781 | 0.2092 | 100 | 0.5993 | -0.5364 | -0.9859 | 0.7148 | 0.4494 | -362.0340 | -319.2645 | -2.6423 | -2.6654 |
65
+ | 0.5663 | 0.4184 | 200 | 0.5492 | -0.4361 | -1.0763 | 0.7695 | 0.6401 | -371.0742 | -309.2355 | -2.4241 | -2.4516 |
66
+ | 0.5168 | 0.6276 | 300 | 0.5311 | -0.6678 | -1.4065 | 0.7695 | 0.7387 | -404.0972 | -332.3974 | -2.3380 | -2.3637 |
67
+ | 0.5231 | 0.8368 | 400 | 0.5222 | -0.7529 | -1.5582 | 0.7695 | 0.8053 | -419.2669 | -340.9086 | -2.3048 | -2.3305 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.45.2
73
+ - Pytorch 2.2.2+rocm5.7
74
+ - Datasets 3.2.0
75
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5580766839462344,
5
+ "train_runtime": 18824.9899,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 3.247,
8
+ "train_steps_per_second": 0.025
9
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.2"
6
+ }
runs/Jan06_13-16-22_nid002906/events.out.tfevents.1736141106.nid002906.2124115.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3644045b2f43e4831d74302071c4d9d992290072a3cdc709eb8e148823025afc
3
- size 38541
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca5d84c51703761c6d637b92495ecbf5d6efb166af52e59efa2630dd0465fbe
3
+ size 39229
runs/Jan06_13-16-22_nid002966/events.out.tfevents.1736141105.nid002966.1056806.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87bb4f846957bc47220e349004834d5013a5d12ca862e4ce702108085c4ccaf4
3
- size 39227
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:696d3d52f2c62b9841583de0048d23fd67c58c37b9264c5c1456ea67d62ee212
3
+ size 39915
runs/Jan06_13-16-23_nid002964/events.out.tfevents.1736141102.nid002964.2545250.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8eb3d2c973f1ee1176896516b54b503eb952258d47c925158c6fbc014db1c932
3
- size 37851
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6632c7af67f8d465be6a286d71d3c45566f9c43729d53269ed92c70e20482fc3
3
+ size 38539
runs/Jan06_13-16-24_nid002974/events.out.tfevents.1736141107.nid002974.3529558.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2f2cf4bfb9e0fd7da5813b0924d073e9f02fb43a13fa99ef5cd4dccda3773a6
3
- size 38539
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90a40fce66e02781b09f0af8645913729c6bef105308bcad4afd82d3c1c8dc9f
3
+ size 39227
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5580766839462344,
5
+ "train_runtime": 18824.9899,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 3.247,
8
+ "train_steps_per_second": 0.025
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 478,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0020920502092050207,
13
+ "grad_norm": 8.604485188564416,
14
+ "learning_rate": 1.0416666666666666e-08,
15
+ "logits/chosen": -2.5630249977111816,
16
+ "logits/rejected": -2.539490222930908,
17
+ "logps/chosen": -274.9568786621094,
18
+ "logps/rejected": -365.68719482421875,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.02092050209205021,
28
+ "grad_norm": 8.543694225837466,
29
+ "learning_rate": 1.0416666666666667e-07,
30
+ "logits/chosen": -2.401860237121582,
31
+ "logits/rejected": -2.379164218902588,
32
+ "logps/chosen": -264.2777099609375,
33
+ "logps/rejected": -250.76095581054688,
34
+ "loss": 0.6932,
35
+ "rewards/accuracies": 0.4375,
36
+ "rewards/chosen": -0.00042521452996879816,
37
+ "rewards/margins": -0.00016496983880642802,
38
+ "rewards/rejected": -0.0002602446766104549,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.04184100418410042,
43
+ "grad_norm": 7.958205676570384,
44
+ "learning_rate": 2.0833333333333333e-07,
45
+ "logits/chosen": -2.464437246322632,
46
+ "logits/rejected": -2.4167773723602295,
47
+ "logps/chosen": -281.3401794433594,
48
+ "logps/rejected": -293.8695373535156,
49
+ "loss": 0.6925,
50
+ "rewards/accuracies": 0.543749988079071,
51
+ "rewards/chosen": 0.0008064938010647893,
52
+ "rewards/margins": 0.0007130112498998642,
53
+ "rewards/rejected": 9.34823983698152e-05,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.06276150627615062,
58
+ "grad_norm": 8.124896842014486,
59
+ "learning_rate": 3.1249999999999997e-07,
60
+ "logits/chosen": -2.4910924434661865,
61
+ "logits/rejected": -2.43115234375,
62
+ "logps/chosen": -303.0611572265625,
63
+ "logps/rejected": -266.1287536621094,
64
+ "loss": 0.69,
65
+ "rewards/accuracies": 0.643750011920929,
66
+ "rewards/chosen": 0.006498756352812052,
67
+ "rewards/margins": 0.005720348563045263,
68
+ "rewards/rejected": 0.0007784081972204149,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.08368200836820083,
73
+ "grad_norm": 8.10957575772329,
74
+ "learning_rate": 4.1666666666666667e-07,
75
+ "logits/chosen": -2.422678232192993,
76
+ "logits/rejected": -2.3961119651794434,
77
+ "logps/chosen": -259.3570861816406,
78
+ "logps/rejected": -244.57296752929688,
79
+ "loss": 0.682,
80
+ "rewards/accuracies": 0.7437499761581421,
81
+ "rewards/chosen": 0.018198654055595398,
82
+ "rewards/margins": 0.02975170686841011,
83
+ "rewards/rejected": -0.011553054675459862,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.10460251046025104,
88
+ "grad_norm": 8.312437995674673,
89
+ "learning_rate": 4.999733114418725e-07,
90
+ "logits/chosen": -2.3975491523742676,
91
+ "logits/rejected": -2.361694097518921,
92
+ "logps/chosen": -277.6006774902344,
93
+ "logps/rejected": -291.5801086425781,
94
+ "loss": 0.6683,
95
+ "rewards/accuracies": 0.7250000238418579,
96
+ "rewards/chosen": -0.0014305987861007452,
97
+ "rewards/margins": 0.044913023710250854,
98
+ "rewards/rejected": -0.046343620866537094,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.12552301255230125,
103
+ "grad_norm": 9.936414562170402,
104
+ "learning_rate": 4.990398100856366e-07,
105
+ "logits/chosen": -2.511120080947876,
106
+ "logits/rejected": -2.4669294357299805,
107
+ "logps/chosen": -279.3330383300781,
108
+ "logps/rejected": -312.00738525390625,
109
+ "loss": 0.6507,
110
+ "rewards/accuracies": 0.71875,
111
+ "rewards/chosen": -0.037127092480659485,
112
+ "rewards/margins": 0.09578964114189148,
113
+ "rewards/rejected": -0.13291671872138977,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.14644351464435146,
118
+ "grad_norm": 10.22817291801341,
119
+ "learning_rate": 4.967775735898179e-07,
120
+ "logits/chosen": -2.4898154735565186,
121
+ "logits/rejected": -2.4944119453430176,
122
+ "logps/chosen": -275.068603515625,
123
+ "logps/rejected": -286.89044189453125,
124
+ "loss": 0.6243,
125
+ "rewards/accuracies": 0.7875000238418579,
126
+ "rewards/chosen": -0.08297814428806305,
127
+ "rewards/margins": 0.19407641887664795,
128
+ "rewards/rejected": -0.2770545482635498,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.16736401673640167,
133
+ "grad_norm": 15.428530845210528,
134
+ "learning_rate": 4.931986719649298e-07,
135
+ "logits/chosen": -2.669694185256958,
136
+ "logits/rejected": -2.6287219524383545,
137
+ "logps/chosen": -357.6214904785156,
138
+ "logps/rejected": -320.73419189453125,
139
+ "loss": 0.605,
140
+ "rewards/accuracies": 0.643750011920929,
141
+ "rewards/chosen": -0.30702000856399536,
142
+ "rewards/margins": 0.24188637733459473,
143
+ "rewards/rejected": -0.5489063858985901,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.18828451882845187,
148
+ "grad_norm": 24.079252908954956,
149
+ "learning_rate": 4.883222001996351e-07,
150
+ "logits/chosen": -2.6210248470306396,
151
+ "logits/rejected": -2.5822036266326904,
152
+ "logps/chosen": -319.667724609375,
153
+ "logps/rejected": -362.547607421875,
154
+ "loss": 0.5726,
155
+ "rewards/accuracies": 0.762499988079071,
156
+ "rewards/chosen": -0.5919733643531799,
157
+ "rewards/margins": 0.5127191543579102,
158
+ "rewards/rejected": -1.1046924591064453,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.20920502092050208,
163
+ "grad_norm": 17.67183789538999,
164
+ "learning_rate": 4.821741763807186e-07,
165
+ "logits/chosen": -2.610281467437744,
166
+ "logits/rejected": -2.5993425846099854,
167
+ "logps/chosen": -317.454345703125,
168
+ "logps/rejected": -365.478271484375,
169
+ "loss": 0.5781,
170
+ "rewards/accuracies": 0.737500011920929,
171
+ "rewards/chosen": -0.5818353295326233,
172
+ "rewards/margins": 0.5090855360031128,
173
+ "rewards/rejected": -1.0909208059310913,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.20920502092050208,
178
+ "eval_logits/chosen": -2.66536021232605,
179
+ "eval_logits/rejected": -2.6422924995422363,
180
+ "eval_logps/chosen": -319.2645263671875,
181
+ "eval_logps/rejected": -362.03399658203125,
182
+ "eval_loss": 0.5993078351020813,
183
+ "eval_rewards/accuracies": 0.71484375,
184
+ "eval_rewards/chosen": -0.5364254713058472,
185
+ "eval_rewards/margins": 0.44944244623184204,
186
+ "eval_rewards/rejected": -0.9858679175376892,
187
+ "eval_runtime": 203.9171,
188
+ "eval_samples_per_second": 9.808,
189
+ "eval_steps_per_second": 0.157,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 0.2301255230125523,
194
+ "grad_norm": 23.558600466562133,
195
+ "learning_rate": 4.747874028753375e-07,
196
+ "logits/chosen": -2.661102294921875,
197
+ "logits/rejected": -2.6200637817382812,
198
+ "logps/chosen": -362.30633544921875,
199
+ "logps/rejected": -355.55291748046875,
200
+ "loss": 0.5922,
201
+ "rewards/accuracies": 0.706250011920929,
202
+ "rewards/chosen": -0.533866822719574,
203
+ "rewards/margins": 0.4231007993221283,
204
+ "rewards/rejected": -0.9569675326347351,
205
+ "step": 110
206
+ },
207
+ {
208
+ "epoch": 0.2510460251046025,
209
+ "grad_norm": 18.490910651986002,
210
+ "learning_rate": 4.662012913161997e-07,
211
+ "logits/chosen": -2.5463078022003174,
212
+ "logits/rejected": -2.5458548069000244,
213
+ "logps/chosen": -342.85552978515625,
214
+ "logps/rejected": -369.1539611816406,
215
+ "loss": 0.5664,
216
+ "rewards/accuracies": 0.762499988079071,
217
+ "rewards/chosen": -0.5949645638465881,
218
+ "rewards/margins": 0.4743884205818176,
219
+ "rewards/rejected": -1.0693528652191162,
220
+ "step": 120
221
+ },
222
+ {
223
+ "epoch": 0.2719665271966527,
224
+ "grad_norm": 25.6202551917518,
225
+ "learning_rate": 4.5646165232345103e-07,
226
+ "logits/chosen": -2.553074598312378,
227
+ "logits/rejected": -2.553410530090332,
228
+ "logps/chosen": -345.36749267578125,
229
+ "logps/rejected": -386.101806640625,
230
+ "loss": 0.5517,
231
+ "rewards/accuracies": 0.7250000238418579,
232
+ "rewards/chosen": -0.6696659922599792,
233
+ "rewards/margins": 0.49819788336753845,
234
+ "rewards/rejected": -1.1678640842437744,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.2928870292887029,
239
+ "grad_norm": 19.49608991833418,
240
+ "learning_rate": 4.456204510851956e-07,
241
+ "logits/chosen": -2.535830020904541,
242
+ "logits/rejected": -2.527318000793457,
243
+ "logps/chosen": -372.49200439453125,
244
+ "logps/rejected": -421.67022705078125,
245
+ "loss": 0.5645,
246
+ "rewards/accuracies": 0.7562500238418579,
247
+ "rewards/chosen": -0.5496624708175659,
248
+ "rewards/margins": 0.6272837519645691,
249
+ "rewards/rejected": -1.1769464015960693,
250
+ "step": 140
251
+ },
252
+ {
253
+ "epoch": 0.3138075313807531,
254
+ "grad_norm": 23.227286383486767,
255
+ "learning_rate": 4.337355301007335e-07,
256
+ "logits/chosen": -2.511842727661133,
257
+ "logits/rejected": -2.4751811027526855,
258
+ "logps/chosen": -347.7061462402344,
259
+ "logps/rejected": -381.6315002441406,
260
+ "loss": 0.5662,
261
+ "rewards/accuracies": 0.7124999761581421,
262
+ "rewards/chosen": -0.637377917766571,
263
+ "rewards/margins": 0.42279720306396484,
264
+ "rewards/rejected": -1.0601751804351807,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.33472803347280333,
269
+ "grad_norm": 23.42041919082547,
270
+ "learning_rate": 4.2087030056579986e-07,
271
+ "logits/chosen": -2.524122953414917,
272
+ "logits/rejected": -2.485301971435547,
273
+ "logps/chosen": -325.80682373046875,
274
+ "logps/rejected": -353.33441162109375,
275
+ "loss": 0.5688,
276
+ "rewards/accuracies": 0.71875,
277
+ "rewards/chosen": -0.48947468400001526,
278
+ "rewards/margins": 0.541519820690155,
279
+ "rewards/rejected": -1.0309945344924927,
280
+ "step": 160
281
+ },
282
+ {
283
+ "epoch": 0.35564853556485354,
284
+ "grad_norm": 23.221895656921156,
285
+ "learning_rate": 4.070934040463998e-07,
286
+ "logits/chosen": -2.465569019317627,
287
+ "logits/rejected": -2.436041831970215,
288
+ "logps/chosen": -311.6986389160156,
289
+ "logps/rejected": -332.34564208984375,
290
+ "loss": 0.558,
291
+ "rewards/accuracies": 0.706250011920929,
292
+ "rewards/chosen": -0.6063622832298279,
293
+ "rewards/margins": 0.40437421202659607,
294
+ "rewards/rejected": -1.0107365846633911,
295
+ "step": 170
296
+ },
297
+ {
298
+ "epoch": 0.37656903765690375,
299
+ "grad_norm": 24.5366094615194,
300
+ "learning_rate": 3.9247834624635404e-07,
301
+ "logits/chosen": -2.411452054977417,
302
+ "logits/rejected": -2.4029018878936768,
303
+ "logps/chosen": -319.69158935546875,
304
+ "logps/rejected": -338.3087463378906,
305
+ "loss": 0.5272,
306
+ "rewards/accuracies": 0.6812499761581421,
307
+ "rewards/chosen": -0.6507824659347534,
308
+ "rewards/margins": 0.46038109064102173,
309
+ "rewards/rejected": -1.11116361618042,
310
+ "step": 180
311
+ },
312
+ {
313
+ "epoch": 0.39748953974895396,
314
+ "grad_norm": 24.5328283472713,
315
+ "learning_rate": 3.7710310482256523e-07,
316
+ "logits/chosen": -2.4384167194366455,
317
+ "logits/rejected": -2.400991201400757,
318
+ "logps/chosen": -342.86517333984375,
319
+ "logps/rejected": -378.6058044433594,
320
+ "loss": 0.5563,
321
+ "rewards/accuracies": 0.7124999761581421,
322
+ "rewards/chosen": -0.6878019571304321,
323
+ "rewards/margins": 0.4584982991218567,
324
+ "rewards/rejected": -1.1463003158569336,
325
+ "step": 190
326
+ },
327
+ {
328
+ "epoch": 0.41841004184100417,
329
+ "grad_norm": 57.338112254541,
330
+ "learning_rate": 3.610497133404795e-07,
331
+ "logits/chosen": -2.356046199798584,
332
+ "logits/rejected": -2.3616952896118164,
333
+ "logps/chosen": -321.7983093261719,
334
+ "logps/rejected": -376.4720458984375,
335
+ "loss": 0.5663,
336
+ "rewards/accuracies": 0.7124999761581421,
337
+ "rewards/chosen": -0.6318550109863281,
338
+ "rewards/margins": 0.6110703349113464,
339
+ "rewards/rejected": -1.2429252862930298,
340
+ "step": 200
341
+ },
342
+ {
343
+ "epoch": 0.41841004184100417,
344
+ "eval_logits/chosen": -2.4516215324401855,
345
+ "eval_logits/rejected": -2.4241268634796143,
346
+ "eval_logps/chosen": -309.2355041503906,
347
+ "eval_logps/rejected": -371.07415771484375,
348
+ "eval_loss": 0.5492264032363892,
349
+ "eval_rewards/accuracies": 0.76953125,
350
+ "eval_rewards/chosen": -0.43613502383232117,
351
+ "eval_rewards/margins": 0.6401345729827881,
352
+ "eval_rewards/rejected": -1.0762696266174316,
353
+ "eval_runtime": 205.029,
354
+ "eval_samples_per_second": 9.755,
355
+ "eval_steps_per_second": 0.156,
356
+ "step": 200
357
+ },
358
+ {
359
+ "epoch": 0.4393305439330544,
360
+ "grad_norm": 21.903963916935044,
361
+ "learning_rate": 3.4440382358952115e-07,
362
+ "logits/chosen": -2.3729984760284424,
363
+ "logits/rejected": -2.3444366455078125,
364
+ "logps/chosen": -340.30096435546875,
365
+ "logps/rejected": -357.1148986816406,
366
+ "loss": 0.5543,
367
+ "rewards/accuracies": 0.7562500238418579,
368
+ "rewards/chosen": -0.4852035939693451,
369
+ "rewards/margins": 0.5747165679931641,
370
+ "rewards/rejected": -1.059920072555542,
371
+ "step": 210
372
+ },
373
+ {
374
+ "epoch": 0.4602510460251046,
375
+ "grad_norm": 21.127118929830733,
376
+ "learning_rate": 3.272542485937368e-07,
377
+ "logits/chosen": -2.3580079078674316,
378
+ "logits/rejected": -2.3387129306793213,
379
+ "logps/chosen": -322.58843994140625,
380
+ "logps/rejected": -360.1854553222656,
381
+ "loss": 0.5452,
382
+ "rewards/accuracies": 0.75,
383
+ "rewards/chosen": -0.596159040927887,
384
+ "rewards/margins": 0.6134015321731567,
385
+ "rewards/rejected": -1.209560513496399,
386
+ "step": 220
387
+ },
388
+ {
389
+ "epoch": 0.4811715481171548,
390
+ "grad_norm": 17.18365128542825,
391
+ "learning_rate": 3.096924887558854e-07,
392
+ "logits/chosen": -2.3685572147369385,
393
+ "logits/rejected": -2.347229480743408,
394
+ "logps/chosen": -317.18878173828125,
395
+ "logps/rejected": -382.9065246582031,
396
+ "loss": 0.5496,
397
+ "rewards/accuracies": 0.7124999761581421,
398
+ "rewards/chosen": -0.6833942532539368,
399
+ "rewards/margins": 0.6639358401298523,
400
+ "rewards/rejected": -1.34732985496521,
401
+ "step": 230
402
+ },
403
+ {
404
+ "epoch": 0.502092050209205,
405
+ "grad_norm": 20.494176489772407,
406
+ "learning_rate": 2.9181224366319943e-07,
407
+ "logits/chosen": -2.4321351051330566,
408
+ "logits/rejected": -2.3992695808410645,
409
+ "logps/chosen": -321.96484375,
410
+ "logps/rejected": -362.93682861328125,
411
+ "loss": 0.5234,
412
+ "rewards/accuracies": 0.7437499761581421,
413
+ "rewards/chosen": -0.5447031259536743,
414
+ "rewards/margins": 0.6238798499107361,
415
+ "rewards/rejected": -1.1685830354690552,
416
+ "step": 240
417
+ },
418
+ {
419
+ "epoch": 0.5230125523012552,
420
+ "grad_norm": 22.525297509363288,
421
+ "learning_rate": 2.7370891215954565e-07,
422
+ "logits/chosen": -2.3660457134246826,
423
+ "logits/rejected": -2.312988042831421,
424
+ "logps/chosen": -365.48583984375,
425
+ "logps/rejected": -400.5924377441406,
426
+ "loss": 0.5288,
427
+ "rewards/accuracies": 0.7437499761581421,
428
+ "rewards/chosen": -0.6344345808029175,
429
+ "rewards/margins": 0.7686706781387329,
430
+ "rewards/rejected": -1.40310537815094,
431
+ "step": 250
432
+ },
433
+ {
434
+ "epoch": 0.5439330543933054,
435
+ "grad_norm": 25.70816950355519,
436
+ "learning_rate": 2.55479083351317e-07,
437
+ "logits/chosen": -2.393095016479492,
438
+ "logits/rejected": -2.378119945526123,
439
+ "logps/chosen": -378.4981994628906,
440
+ "logps/rejected": -401.1968078613281,
441
+ "loss": 0.5314,
442
+ "rewards/accuracies": 0.737500011920929,
443
+ "rewards/chosen": -0.8221562504768372,
444
+ "rewards/margins": 0.6506116390228271,
445
+ "rewards/rejected": -1.4727678298950195,
446
+ "step": 260
447
+ },
448
+ {
449
+ "epoch": 0.5648535564853556,
450
+ "grad_norm": 21.643979097675565,
451
+ "learning_rate": 2.3722002126275822e-07,
452
+ "logits/chosen": -2.3883724212646484,
453
+ "logits/rejected": -2.373289108276367,
454
+ "logps/chosen": -356.40008544921875,
455
+ "logps/rejected": -386.89495849609375,
456
+ "loss": 0.5462,
457
+ "rewards/accuracies": 0.6812499761581421,
458
+ "rewards/chosen": -0.7683958411216736,
459
+ "rewards/margins": 0.5183417797088623,
460
+ "rewards/rejected": -1.2867376804351807,
461
+ "step": 270
462
+ },
463
+ {
464
+ "epoch": 0.5857740585774058,
465
+ "grad_norm": 25.598544455627437,
466
+ "learning_rate": 2.19029145890313e-07,
467
+ "logits/chosen": -2.351996421813965,
468
+ "logits/rejected": -2.315722942352295,
469
+ "logps/chosen": -333.72406005859375,
470
+ "logps/rejected": -375.9908142089844,
471
+ "loss": 0.5446,
472
+ "rewards/accuracies": 0.7250000238418579,
473
+ "rewards/chosen": -0.6726834177970886,
474
+ "rewards/margins": 0.6510350704193115,
475
+ "rewards/rejected": -1.3237184286117554,
476
+ "step": 280
477
+ },
478
+ {
479
+ "epoch": 0.606694560669456,
480
+ "grad_norm": 20.718970444341483,
481
+ "learning_rate": 2.0100351342479216e-07,
482
+ "logits/chosen": -2.3804399967193604,
483
+ "logits/rejected": -2.346060276031494,
484
+ "logps/chosen": -319.09820556640625,
485
+ "logps/rejected": -370.26690673828125,
486
+ "loss": 0.5374,
487
+ "rewards/accuracies": 0.762499988079071,
488
+ "rewards/chosen": -0.682944655418396,
489
+ "rewards/margins": 0.6234767436981201,
490
+ "rewards/rejected": -1.3064215183258057,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 0.6276150627615062,
495
+ "grad_norm": 21.96416553857481,
496
+ "learning_rate": 1.8323929841460178e-07,
497
+ "logits/chosen": -2.3465282917022705,
498
+ "logits/rejected": -2.3195531368255615,
499
+ "logps/chosen": -381.2027893066406,
500
+ "logps/rejected": -404.70294189453125,
501
+ "loss": 0.5168,
502
+ "rewards/accuracies": 0.737500011920929,
503
+ "rewards/chosen": -0.801205039024353,
504
+ "rewards/margins": 0.6560418605804443,
505
+ "rewards/rejected": -1.457247018814087,
506
+ "step": 300
507
+ },
508
+ {
509
+ "epoch": 0.6276150627615062,
510
+ "eval_logits/chosen": -2.363710880279541,
511
+ "eval_logits/rejected": -2.338043212890625,
512
+ "eval_logps/chosen": -332.39739990234375,
513
+ "eval_logps/rejected": -404.09722900390625,
514
+ "eval_loss": 0.5310549139976501,
515
+ "eval_rewards/accuracies": 0.76953125,
516
+ "eval_rewards/chosen": -0.6677543520927429,
517
+ "eval_rewards/margins": 0.7387461066246033,
518
+ "eval_rewards/rejected": -1.4065003395080566,
519
+ "eval_runtime": 210.0493,
520
+ "eval_samples_per_second": 9.522,
521
+ "eval_steps_per_second": 0.152,
522
+ "step": 300
523
+ },
524
+ {
525
+ "epoch": 0.6485355648535565,
526
+ "grad_norm": 22.802600608827092,
527
+ "learning_rate": 1.6583128063291573e-07,
528
+ "logits/chosen": -2.2692363262176514,
529
+ "logits/rejected": -2.2743899822235107,
530
+ "logps/chosen": -367.472900390625,
531
+ "logps/rejected": -398.7881774902344,
532
+ "loss": 0.5247,
533
+ "rewards/accuracies": 0.7875000238418579,
534
+ "rewards/chosen": -0.7357426881790161,
535
+ "rewards/margins": 0.67784583568573,
536
+ "rewards/rejected": -1.4135886430740356,
537
+ "step": 310
538
+ },
539
+ {
540
+ "epoch": 0.6694560669456067,
541
+ "grad_norm": 23.275926848557994,
542
+ "learning_rate": 1.488723393865766e-07,
543
+ "logits/chosen": -2.3339226245880127,
544
+ "logits/rejected": -2.312654972076416,
545
+ "logps/chosen": -366.4847717285156,
546
+ "logps/rejected": -386.82659912109375,
547
+ "loss": 0.5072,
548
+ "rewards/accuracies": 0.75,
549
+ "rewards/chosen": -0.7132988572120667,
550
+ "rewards/margins": 0.6844549179077148,
551
+ "rewards/rejected": -1.3977539539337158,
552
+ "step": 320
553
+ },
554
+ {
555
+ "epoch": 0.6903765690376569,
556
+ "grad_norm": 22.83277919154358,
557
+ "learning_rate": 1.3245295796480788e-07,
558
+ "logits/chosen": -2.3407974243164062,
559
+ "logits/rejected": -2.3033108711242676,
560
+ "logps/chosen": -341.2934875488281,
561
+ "logps/rejected": -392.6544494628906,
562
+ "loss": 0.5267,
563
+ "rewards/accuracies": 0.675000011920929,
564
+ "rewards/chosen": -0.7261720895767212,
565
+ "rewards/margins": 0.5603376626968384,
566
+ "rewards/rejected": -1.2865098714828491,
567
+ "step": 330
568
+ },
569
+ {
570
+ "epoch": 0.7112970711297071,
571
+ "grad_norm": 27.549468294632515,
572
+ "learning_rate": 1.1666074087171627e-07,
573
+ "logits/chosen": -2.350193500518799,
574
+ "logits/rejected": -2.3031275272369385,
575
+ "logps/chosen": -358.90020751953125,
576
+ "logps/rejected": -430.9949645996094,
577
+ "loss": 0.5197,
578
+ "rewards/accuracies": 0.78125,
579
+ "rewards/chosen": -0.6628645658493042,
580
+ "rewards/margins": 0.8692364692687988,
581
+ "rewards/rejected": -1.5321009159088135,
582
+ "step": 340
583
+ },
584
+ {
585
+ "epoch": 0.7322175732217573,
586
+ "grad_norm": 24.59368890384119,
587
+ "learning_rate": 1.0157994641835734e-07,
588
+ "logits/chosen": -2.3114981651306152,
589
+ "logits/rejected": -2.2836005687713623,
590
+ "logps/chosen": -327.0142517089844,
591
+ "logps/rejected": -381.579833984375,
592
+ "loss": 0.4955,
593
+ "rewards/accuracies": 0.768750011920929,
594
+ "rewards/chosen": -0.6866356134414673,
595
+ "rewards/margins": 0.7695199251174927,
596
+ "rewards/rejected": -1.45615553855896,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 0.7531380753138075,
601
+ "grad_norm": 23.35925011150768,
602
+ "learning_rate": 8.729103716819111e-08,
603
+ "logits/chosen": -2.351583480834961,
604
+ "logits/rejected": -2.2726166248321533,
605
+ "logps/chosen": -397.0428161621094,
606
+ "logps/rejected": -421.7183532714844,
607
+ "loss": 0.5414,
608
+ "rewards/accuracies": 0.75,
609
+ "rewards/chosen": -0.9053381085395813,
610
+ "rewards/margins": 0.6986960768699646,
611
+ "rewards/rejected": -1.6040340662002563,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 0.7740585774058577,
616
+ "grad_norm": 21.31130015427156,
617
+ "learning_rate": 7.387025063449081e-08,
618
+ "logits/chosen": -2.2894997596740723,
619
+ "logits/rejected": -2.262648820877075,
620
+ "logps/chosen": -370.5188293457031,
621
+ "logps/rejected": -397.9955139160156,
622
+ "loss": 0.531,
623
+ "rewards/accuracies": 0.71875,
624
+ "rewards/chosen": -0.9584518671035767,
625
+ "rewards/margins": 0.6381843686103821,
626
+ "rewards/rejected": -1.596636176109314,
627
+ "step": 370
628
+ },
629
+ {
630
+ "epoch": 0.7949790794979079,
631
+ "grad_norm": 20.80814381693103,
632
+ "learning_rate": 6.138919252022435e-08,
633
+ "logits/chosen": -2.2165627479553223,
634
+ "logits/rejected": -2.221606731414795,
635
+ "logps/chosen": -339.34637451171875,
636
+ "logps/rejected": -434.6751403808594,
637
+ "loss": 0.5212,
638
+ "rewards/accuracies": 0.7749999761581421,
639
+ "rewards/chosen": -1.0024492740631104,
640
+ "rewards/margins": 0.7857004404067993,
641
+ "rewards/rejected": -1.7881495952606201,
642
+ "step": 380
643
+ },
644
+ {
645
+ "epoch": 0.8158995815899581,
646
+ "grad_norm": 25.031968994288366,
647
+ "learning_rate": 4.991445467064689e-08,
648
+ "logits/chosen": -2.286673069000244,
649
+ "logits/rejected": -2.2558798789978027,
650
+ "logps/chosen": -391.0277404785156,
651
+ "logps/rejected": -440.29052734375,
652
+ "loss": 0.5119,
653
+ "rewards/accuracies": 0.71875,
654
+ "rewards/chosen": -0.8834357261657715,
655
+ "rewards/margins": 0.6738095879554749,
656
+ "rewards/rejected": -1.5572453737258911,
657
+ "step": 390
658
+ },
659
+ {
660
+ "epoch": 0.8368200836820083,
661
+ "grad_norm": 25.476147899426728,
662
+ "learning_rate": 3.9507259776993954e-08,
663
+ "logits/chosen": -2.287022113800049,
664
+ "logits/rejected": -2.24157977104187,
665
+ "logps/chosen": -362.04241943359375,
666
+ "logps/rejected": -430.1278381347656,
667
+ "loss": 0.5231,
668
+ "rewards/accuracies": 0.731249988079071,
669
+ "rewards/chosen": -0.8974603414535522,
670
+ "rewards/margins": 0.7547586560249329,
671
+ "rewards/rejected": -1.6522190570831299,
672
+ "step": 400
673
+ },
674
+ {
675
+ "epoch": 0.8368200836820083,
676
+ "eval_logits/chosen": -2.330451488494873,
677
+ "eval_logits/rejected": -2.304779529571533,
678
+ "eval_logps/chosen": -340.9085693359375,
679
+ "eval_logps/rejected": -419.2669372558594,
680
+ "eval_loss": 0.5222000479698181,
681
+ "eval_rewards/accuracies": 0.76953125,
682
+ "eval_rewards/chosen": -0.7528659105300903,
683
+ "eval_rewards/margins": 0.8053313493728638,
684
+ "eval_rewards/rejected": -1.558197259902954,
685
+ "eval_runtime": 204.9677,
686
+ "eval_samples_per_second": 9.758,
687
+ "eval_steps_per_second": 0.156,
688
+ "step": 400
689
+ },
690
+ {
691
+ "epoch": 0.8577405857740585,
692
+ "grad_norm": 24.371943988476055,
693
+ "learning_rate": 3.022313472693447e-08,
694
+ "logits/chosen": -2.329723358154297,
695
+ "logits/rejected": -2.2943921089172363,
696
+ "logps/chosen": -381.3471374511719,
697
+ "logps/rejected": -423.9512634277344,
698
+ "loss": 0.5283,
699
+ "rewards/accuracies": 0.7749999761581421,
700
+ "rewards/chosen": -0.8080742955207825,
701
+ "rewards/margins": 0.7384942770004272,
702
+ "rewards/rejected": -1.546568512916565,
703
+ "step": 410
704
+ },
705
+ {
706
+ "epoch": 0.8786610878661087,
707
+ "grad_norm": 30.22103066532477,
708
+ "learning_rate": 2.2111614344599684e-08,
709
+ "logits/chosen": -2.292484760284424,
710
+ "logits/rejected": -2.2869484424591064,
711
+ "logps/chosen": -380.738037109375,
712
+ "logps/rejected": -429.08856201171875,
713
+ "loss": 0.5085,
714
+ "rewards/accuracies": 0.6812499761581421,
715
+ "rewards/chosen": -0.8421257138252258,
716
+ "rewards/margins": 0.7031220197677612,
717
+ "rewards/rejected": -1.5452475547790527,
718
+ "step": 420
719
+ },
720
+ {
721
+ "epoch": 0.899581589958159,
722
+ "grad_norm": 25.901609680308766,
723
+ "learning_rate": 1.521597710086439e-08,
724
+ "logits/chosen": -2.2143397331237793,
725
+ "logits/rejected": -2.163553476333618,
726
+ "logps/chosen": -368.2716369628906,
727
+ "logps/rejected": -418.94598388671875,
728
+ "loss": 0.5103,
729
+ "rewards/accuracies": 0.762499988079071,
730
+ "rewards/chosen": -0.8807498216629028,
731
+ "rewards/margins": 0.741529643535614,
732
+ "rewards/rejected": -1.6222795248031616,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 0.9205020920502092,
737
+ "grad_norm": 23.039233385517466,
738
+ "learning_rate": 9.57301420397924e-09,
739
+ "logits/chosen": -2.298792839050293,
740
+ "logits/rejected": -2.2591471672058105,
741
+ "logps/chosen": -360.7129821777344,
742
+ "logps/rejected": -415.3321838378906,
743
+ "loss": 0.5094,
744
+ "rewards/accuracies": 0.71875,
745
+ "rewards/chosen": -0.7534270286560059,
746
+ "rewards/margins": 0.7117618918418884,
747
+ "rewards/rejected": -1.4651888608932495,
748
+ "step": 440
749
+ },
750
+ {
751
+ "epoch": 0.9414225941422594,
752
+ "grad_norm": 23.551370802340685,
753
+ "learning_rate": 5.212833302556258e-09,
754
+ "logits/chosen": -2.2180867195129395,
755
+ "logits/rejected": -2.2026524543762207,
756
+ "logps/chosen": -377.8111877441406,
757
+ "logps/rejected": -461.9335021972656,
758
+ "loss": 0.5214,
759
+ "rewards/accuracies": 0.6812499761581421,
760
+ "rewards/chosen": -0.8429912328720093,
761
+ "rewards/margins": 0.6932216882705688,
762
+ "rewards/rejected": -1.5362128019332886,
763
+ "step": 450
764
+ },
765
+ {
766
+ "epoch": 0.9623430962343096,
767
+ "grad_norm": 34.306089949960864,
768
+ "learning_rate": 2.158697848236607e-09,
769
+ "logits/chosen": -2.280740261077881,
770
+ "logits/rejected": -2.252593517303467,
771
+ "logps/chosen": -353.6402893066406,
772
+ "logps/rejected": -386.175048828125,
773
+ "loss": 0.5251,
774
+ "rewards/accuracies": 0.737500011920929,
775
+ "rewards/chosen": -0.7945828437805176,
776
+ "rewards/margins": 0.657060444355011,
777
+ "rewards/rejected": -1.4516431093215942,
778
+ "step": 460
779
+ },
780
+ {
781
+ "epoch": 0.9832635983263598,
782
+ "grad_norm": 21.039289828071094,
783
+ "learning_rate": 4.269029751107489e-10,
784
+ "logits/chosen": -2.260293483734131,
785
+ "logits/rejected": -2.2311267852783203,
786
+ "logps/chosen": -354.1949768066406,
787
+ "logps/rejected": -419.3480529785156,
788
+ "loss": 0.503,
789
+ "rewards/accuracies": 0.71875,
790
+ "rewards/chosen": -0.8010648488998413,
791
+ "rewards/margins": 0.6369507312774658,
792
+ "rewards/rejected": -1.4380155801773071,
793
+ "step": 470
794
+ },
795
+ {
796
+ "epoch": 1.0,
797
+ "step": 478,
798
+ "total_flos": 0.0,
799
+ "train_loss": 0.5580766839462344,
800
+ "train_runtime": 18824.9899,
801
+ "train_samples_per_second": 3.247,
802
+ "train_steps_per_second": 0.025
803
+ }
804
+ ],
805
+ "logging_steps": 10,
806
+ "max_steps": 478,
807
+ "num_input_tokens_seen": 0,
808
+ "num_train_epochs": 1,
809
+ "save_steps": 100,
810
+ "stateful_callbacks": {
811
+ "TrainerControl": {
812
+ "args": {
813
+ "should_epoch_stop": false,
814
+ "should_evaluate": false,
815
+ "should_log": false,
816
+ "should_save": true,
817
+ "should_training_stop": true
818
+ },
819
+ "attributes": {}
820
+ }
821
+ },
822
+ "total_flos": 0.0,
823
+ "train_batch_size": 8,
824
+ "trial_name": null,
825
+ "trial_params": null
826
+ }