silviasapora commited on
Commit
1b98ff8
·
verified ·
1 Parent(s): 0fa4c92

Model save

Browse files
Files changed (4) hide show
  1. README.md +66 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +282 -0
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: gemma-7b-sft-silvia_simpo-basic-5e-7-005-v144
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - orpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for gemma-7b-sft-silvia_simpo-basic-5e-7-005-v144
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="silviasapora/gemma-7b-sft-silvia_simpo-basic-5e-7-005-v144", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/silvias/huggingface/runs/detfchne)
30
+
31
+
32
+ This model was trained with ORPO, a method introduced in [ORPO: Monolithic Preference Optimization without Reference Model](https://huggingface.co/papers/2403.07691).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.15.2
37
+ - Transformers: 4.49.0
38
+ - Pytorch: 2.5.1
39
+ - Datasets: 3.1.0
40
+ - Tokenizers: 0.21.1
41
+
42
+ ## Citations
43
+
44
+ Cite ORPO as:
45
+
46
+ ```bibtex
47
+ @article{hong2024orpo,
48
+ title = {{ORPO: Monolithic Preference Optimization without Reference Model}},
49
+ author = {Jiwoo Hong and Noah Lee and James Thorne},
50
+ year = 2024,
51
+ eprint = {arXiv:2403.07691}
52
+ }
53
+ ```
54
+
55
+ Cite TRL as:
56
+
57
+ ```bibtex
58
+ @misc{vonwerra2022trl,
59
+ title = {{TRL: Transformer Reinforcement Learning}},
60
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
61
+ year = 2020,
62
+ journal = {GitHub repository},
63
+ publisher = {GitHub},
64
+ howpublished = {\url{https://github.com/huggingface/trl}}
65
+ }
66
+ ```
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9885148514851485,
3
+ "total_flos": 0.0,
4
+ "train_loss": 4.754565581297263,
5
+ "train_runtime": 2713.8691,
6
+ "train_samples": 5050,
7
+ "train_samples_per_second": 1.861,
8
+ "train_steps_per_second": 0.029
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9885148514851485,
3
+ "total_flos": 0.0,
4
+ "train_loss": 4.754565581297263,
5
+ "train_runtime": 2713.8691,
6
+ "train_samples": 5050,
7
+ "train_samples_per_second": 1.861,
8
+ "train_steps_per_second": 0.029
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9885148514851485,
5
+ "eval_steps": 500,
6
+ "global_step": 78,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06336633663366337,
13
+ "grad_norm": 12.8125,
14
+ "learning_rate": 3.1249999999999997e-07,
15
+ "log_odds_chosen": 0.9911565780639648,
16
+ "log_odds_ratio": -0.41253289580345154,
17
+ "logps/chosen": -0.6660582423210144,
18
+ "logps/rejected": -1.228592872619629,
19
+ "loss": 4.3637,
20
+ "nll_loss": 0.9886282086372375,
21
+ "rewards/accuracies": 0.828125,
22
+ "rewards/chosen": -0.0333029143512249,
23
+ "rewards/margins": 0.028126735240221024,
24
+ "rewards/rejected": -0.061429642140865326,
25
+ "step": 5
26
+ },
27
+ {
28
+ "epoch": 0.12673267326732673,
29
+ "grad_norm": 17.875,
30
+ "learning_rate": 4.989935734988097e-07,
31
+ "log_odds_chosen": 1.1546833515167236,
32
+ "log_odds_ratio": -0.37236329913139343,
33
+ "logps/chosen": -0.5902147889137268,
34
+ "logps/rejected": -1.2310830354690552,
35
+ "loss": 4.5589,
36
+ "nll_loss": 0.8954709768295288,
37
+ "rewards/accuracies": 0.846875011920929,
38
+ "rewards/chosen": -0.02951074205338955,
39
+ "rewards/margins": 0.03204340860247612,
40
+ "rewards/rejected": -0.06155414506793022,
41
+ "step": 10
42
+ },
43
+ {
44
+ "epoch": 0.1900990099009901,
45
+ "grad_norm": 15.0,
46
+ "learning_rate": 4.877641290737883e-07,
47
+ "log_odds_chosen": 1.1432024240493774,
48
+ "log_odds_ratio": -0.3701472878456116,
49
+ "logps/chosen": -0.5654497146606445,
50
+ "logps/rejected": -1.1885969638824463,
51
+ "loss": 4.5936,
52
+ "nll_loss": 0.875917911529541,
53
+ "rewards/accuracies": 0.8843749761581421,
54
+ "rewards/chosen": -0.028272485360503197,
55
+ "rewards/margins": 0.03115735575556755,
56
+ "rewards/rejected": -0.0594298429787159,
57
+ "step": 15
58
+ },
59
+ {
60
+ "epoch": 0.25346534653465347,
61
+ "grad_norm": 15.1875,
62
+ "learning_rate": 4.646121984004665e-07,
63
+ "log_odds_chosen": 1.0618789196014404,
64
+ "log_odds_ratio": -0.38934561610221863,
65
+ "logps/chosen": -0.6174649000167847,
66
+ "logps/rejected": -1.2051403522491455,
67
+ "loss": 4.4157,
68
+ "nll_loss": 0.937028706073761,
69
+ "rewards/accuracies": 0.856249988079071,
70
+ "rewards/chosen": -0.030873248353600502,
71
+ "rewards/margins": 0.029383767396211624,
72
+ "rewards/rejected": -0.06025701016187668,
73
+ "step": 20
74
+ },
75
+ {
76
+ "epoch": 0.31683168316831684,
77
+ "grad_norm": 16.5,
78
+ "learning_rate": 4.3069871595684787e-07,
79
+ "log_odds_chosen": 1.2300812005996704,
80
+ "log_odds_ratio": -0.3776150345802307,
81
+ "logps/chosen": -0.6107009649276733,
82
+ "logps/rejected": -1.3069515228271484,
83
+ "loss": 4.4461,
84
+ "nll_loss": 0.8909252882003784,
85
+ "rewards/accuracies": 0.856249988079071,
86
+ "rewards/chosen": -0.030535047873854637,
87
+ "rewards/margins": 0.03481253236532211,
88
+ "rewards/rejected": -0.0653475821018219,
89
+ "step": 25
90
+ },
91
+ {
92
+ "epoch": 0.3801980198019802,
93
+ "grad_norm": 16.25,
94
+ "learning_rate": 3.877242453630256e-07,
95
+ "log_odds_chosen": 1.3746024370193481,
96
+ "log_odds_ratio": -0.3285817801952362,
97
+ "logps/chosen": -0.5757168531417847,
98
+ "logps/rejected": -1.3395036458969116,
99
+ "loss": 4.232,
100
+ "nll_loss": 0.8684650659561157,
101
+ "rewards/accuracies": 0.890625,
102
+ "rewards/chosen": -0.02878584899008274,
103
+ "rewards/margins": 0.03818933293223381,
104
+ "rewards/rejected": -0.0669751763343811,
105
+ "step": 30
106
+ },
107
+ {
108
+ "epoch": 0.44356435643564357,
109
+ "grad_norm": 17.75,
110
+ "learning_rate": 3.378437060203357e-07,
111
+ "log_odds_chosen": 1.5565850734710693,
112
+ "log_odds_ratio": -0.28478947281837463,
113
+ "logps/chosen": -0.5244386196136475,
114
+ "logps/rejected": -1.3875908851623535,
115
+ "loss": 4.7516,
116
+ "nll_loss": 0.796803891658783,
117
+ "rewards/accuracies": 0.934374988079071,
118
+ "rewards/chosen": -0.026221930980682373,
119
+ "rewards/margins": 0.04315761476755142,
120
+ "rewards/rejected": -0.0693795457482338,
121
+ "step": 35
122
+ },
123
+ {
124
+ "epoch": 0.5069306930693069,
125
+ "grad_norm": 21.625,
126
+ "learning_rate": 2.8355831645441387e-07,
127
+ "log_odds_chosen": 1.7404935359954834,
128
+ "log_odds_ratio": -0.24158628284931183,
129
+ "logps/chosen": -0.4815496802330017,
130
+ "logps/rejected": -1.4325745105743408,
131
+ "loss": 5.0552,
132
+ "nll_loss": 0.7471240758895874,
133
+ "rewards/accuracies": 0.9375,
134
+ "rewards/chosen": -0.024077486246824265,
135
+ "rewards/margins": 0.047551244497299194,
136
+ "rewards/rejected": -0.07162873446941376,
137
+ "step": 40
138
+ },
139
+ {
140
+ "epoch": 0.5702970297029702,
141
+ "grad_norm": 20.125,
142
+ "learning_rate": 2.2759017277414164e-07,
143
+ "log_odds_chosen": 1.6410064697265625,
144
+ "log_odds_ratio": -0.26414045691490173,
145
+ "logps/chosen": -0.5090475678443909,
146
+ "logps/rejected": -1.4282280206680298,
147
+ "loss": 4.6016,
148
+ "nll_loss": 0.7733598947525024,
149
+ "rewards/accuracies": 0.9468749761581421,
150
+ "rewards/chosen": -0.025452375411987305,
151
+ "rewards/margins": 0.04595901817083359,
152
+ "rewards/rejected": -0.07141139358282089,
153
+ "step": 45
154
+ },
155
+ {
156
+ "epoch": 0.6336633663366337,
157
+ "grad_norm": 26.5,
158
+ "learning_rate": 1.7274575140626315e-07,
159
+ "log_odds_chosen": 1.7832257747650146,
160
+ "log_odds_ratio": -0.23228943347930908,
161
+ "logps/chosen": -0.46333685517311096,
162
+ "logps/rejected": -1.3944836854934692,
163
+ "loss": 5.7573,
164
+ "nll_loss": 0.709271252155304,
165
+ "rewards/accuracies": 0.953125,
166
+ "rewards/chosen": -0.023166844621300697,
167
+ "rewards/margins": 0.046557340770959854,
168
+ "rewards/rejected": -0.0697241872549057,
169
+ "step": 50
170
+ },
171
+ {
172
+ "epoch": 0.697029702970297,
173
+ "grad_norm": 24.125,
174
+ "learning_rate": 1.2177518064852348e-07,
175
+ "log_odds_chosen": 1.5008140802383423,
176
+ "log_odds_ratio": -0.30259355902671814,
177
+ "logps/chosen": -0.5552427172660828,
178
+ "logps/rejected": -1.3832123279571533,
179
+ "loss": 5.0102,
180
+ "nll_loss": 0.7843067646026611,
181
+ "rewards/accuracies": 0.9125000238418579,
182
+ "rewards/chosen": -0.027762139216065407,
183
+ "rewards/margins": 0.041398484259843826,
184
+ "rewards/rejected": -0.06916062533855438,
185
+ "step": 55
186
+ },
187
+ {
188
+ "epoch": 0.7603960396039604,
189
+ "grad_norm": 26.25,
190
+ "learning_rate": 7.723433775328384e-08,
191
+ "log_odds_chosen": 1.6810848712921143,
192
+ "log_odds_ratio": -0.2639027237892151,
193
+ "logps/chosen": -0.5468909740447998,
194
+ "logps/rejected": -1.5102508068084717,
195
+ "loss": 5.0576,
196
+ "nll_loss": 0.787597119808197,
197
+ "rewards/accuracies": 0.9375,
198
+ "rewards/chosen": -0.02734454534947872,
199
+ "rewards/margins": 0.04816799610853195,
200
+ "rewards/rejected": -0.07551254332065582,
201
+ "step": 60
202
+ },
203
+ {
204
+ "epoch": 0.8237623762376237,
205
+ "grad_norm": 26.25,
206
+ "learning_rate": 4.1356686569674335e-08,
207
+ "log_odds_chosen": 1.6594898700714111,
208
+ "log_odds_ratio": -0.2648767828941345,
209
+ "logps/chosen": -0.5453223586082458,
210
+ "logps/rejected": -1.4675935506820679,
211
+ "loss": 5.2911,
212
+ "nll_loss": 0.8060859441757202,
213
+ "rewards/accuracies": 0.9312499761581421,
214
+ "rewards/chosen": -0.0272661205381155,
215
+ "rewards/margins": 0.04611356183886528,
216
+ "rewards/rejected": -0.07337968051433563,
217
+ "step": 65
218
+ },
219
+ {
220
+ "epoch": 0.8871287128712871,
221
+ "grad_norm": 22.25,
222
+ "learning_rate": 1.5941282340065697e-08,
223
+ "log_odds_chosen": 1.8507673740386963,
224
+ "log_odds_ratio": -0.22900144755840302,
225
+ "logps/chosen": -0.500530481338501,
226
+ "logps/rejected": -1.5389080047607422,
227
+ "loss": 4.521,
228
+ "nll_loss": 0.7640606164932251,
229
+ "rewards/accuracies": 0.956250011920929,
230
+ "rewards/chosen": -0.02502652443945408,
231
+ "rewards/margins": 0.0519188828766346,
232
+ "rewards/rejected": -0.07694540917873383,
233
+ "step": 70
234
+ },
235
+ {
236
+ "epoch": 0.9504950495049505,
237
+ "grad_norm": 30.375,
238
+ "learning_rate": 2.2625595580163247e-09,
239
+ "log_odds_chosen": 1.575315237045288,
240
+ "log_odds_ratio": -0.2773153781890869,
241
+ "logps/chosen": -0.558388352394104,
242
+ "logps/rejected": -1.4570810794830322,
243
+ "loss": 4.7645,
244
+ "nll_loss": 0.8240699768066406,
245
+ "rewards/accuracies": 0.9375,
246
+ "rewards/chosen": -0.02791941724717617,
247
+ "rewards/margins": 0.04493463411927223,
248
+ "rewards/rejected": -0.07285405695438385,
249
+ "step": 75
250
+ },
251
+ {
252
+ "epoch": 0.9885148514851485,
253
+ "step": 78,
254
+ "total_flos": 0.0,
255
+ "train_loss": 4.754565581297263,
256
+ "train_runtime": 2713.8691,
257
+ "train_samples_per_second": 1.861,
258
+ "train_steps_per_second": 0.029
259
+ }
260
+ ],
261
+ "logging_steps": 5,
262
+ "max_steps": 78,
263
+ "num_input_tokens_seen": 0,
264
+ "num_train_epochs": 1,
265
+ "save_steps": 300,
266
+ "stateful_callbacks": {
267
+ "TrainerControl": {
268
+ "args": {
269
+ "should_epoch_stop": false,
270
+ "should_evaluate": false,
271
+ "should_log": false,
272
+ "should_save": true,
273
+ "should_training_stop": true
274
+ },
275
+ "attributes": {}
276
+ }
277
+ },
278
+ "total_flos": 0.0,
279
+ "train_batch_size": 1,
280
+ "trial_name": null,
281
+ "trial_params": null
282
+ }