Benjaminpwh commited on
Commit
d4bdc84
·
verified ·
1 Parent(s): 332cce9

Moved checkpoint-3800 contents to root

Browse files
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.3",
25
  "unsloth_fixed": true,
26
- "unsloth_version": "2025.5.9",
27
  "use_cache": true,
28
  "use_sliding_window": false,
29
  "vocab_size": 152064
 
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.3",
25
  "unsloth_fixed": true,
26
+ "unsloth_version": "2025.5.8",
27
  "use_cache": true,
28
  "use_sliding_window": false,
29
  "vocab_size": 152064
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "max_length": 32768,
9
+ "pad_token_id": 151654,
10
+ "repetition_penalty": 1.05,
11
+ "temperature": 0.7,
12
+ "top_k": 20,
13
+ "top_p": 0.8,
14
+ "transformers_version": "4.52.3"
15
+ }
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26b5019a969a59f0fa2c4f2bf9e1e5d4026e088a3a89a84e64ca987a28312b58
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120b4b4d9eaa684667c0f53a76de9acb9c0c7f4628f48553ee1dee9948d05ac0
3
  size 1089994880
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b5bcfcff7703489ae56b56d1e5c6be85f56e45ac908c92f513931ab4113d6c0
3
+ size 2179992883
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61c19bab1174704a4a4441475683bf1270277af15d2e2c95e964789128e482c4
3
+ size 14645
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2bed431da5b8370427286ed62d7353c6726e9cac8820bbdac7a67d1eba00314
3
+ size 1465
tokenizer_config.json CHANGED
@@ -199,14 +199,10 @@
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",
201
  "extra_special_tokens": {},
202
- "max_length": 1024,
203
  "model_max_length": 32768,
204
  "pad_token": "<|vision_pad|>",
205
  "padding_side": "right",
206
  "split_special_tokens": false,
207
- "stride": 0,
208
  "tokenizer_class": "Qwen2Tokenizer",
209
- "truncation_side": "right",
210
- "truncation_strategy": "longest_first",
211
  "unk_token": null
212
  }
 
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",
201
  "extra_special_tokens": {},
 
202
  "model_max_length": 32768,
203
  "pad_token": "<|vision_pad|>",
204
  "padding_side": "right",
205
  "split_special_tokens": false,
 
206
  "tokenizer_class": "Qwen2Tokenizer",
 
 
207
  "unk_token": null
208
  }
trainer_state.json ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.25460956377173916,
6
+ "eval_steps": 500,
7
+ "global_step": 3800,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 6.700251678203663e-05,
14
+ "grad_norm": 5.96875,
15
+ "learning_rate": 0.0002,
16
+ "loss": 2.3034,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.0067002516782036625,
21
+ "grad_norm": 0.8203125,
22
+ "learning_rate": 0.00019867336683417085,
23
+ "loss": 1.0562,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.013400503356407325,
28
+ "grad_norm": 0.7734375,
29
+ "learning_rate": 0.00019733333333333335,
30
+ "loss": 0.9729,
31
+ "step": 200
32
+ },
33
+ {
34
+ "epoch": 0.02010075503461099,
35
+ "grad_norm": 0.75,
36
+ "learning_rate": 0.00019599329983249582,
37
+ "loss": 0.9693,
38
+ "step": 300
39
+ },
40
+ {
41
+ "epoch": 0.02680100671281465,
42
+ "grad_norm": 0.78125,
43
+ "learning_rate": 0.00019465326633165831,
44
+ "loss": 0.9563,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 0.033501258391018314,
49
+ "grad_norm": 0.72265625,
50
+ "learning_rate": 0.00019331323283082078,
51
+ "loss": 0.9593,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 0.04020151006922198,
56
+ "grad_norm": 0.73828125,
57
+ "learning_rate": 0.00019197319932998325,
58
+ "loss": 0.9478,
59
+ "step": 600
60
+ },
61
+ {
62
+ "epoch": 0.046901761747425635,
63
+ "grad_norm": 0.703125,
64
+ "learning_rate": 0.00019063316582914575,
65
+ "loss": 0.9474,
66
+ "step": 700
67
+ },
68
+ {
69
+ "epoch": 0.0536020134256293,
70
+ "grad_norm": 0.7734375,
71
+ "learning_rate": 0.00018929313232830821,
72
+ "loss": 0.9438,
73
+ "step": 800
74
+ },
75
+ {
76
+ "epoch": 0.060302265103832964,
77
+ "grad_norm": 0.70703125,
78
+ "learning_rate": 0.0001879530988274707,
79
+ "loss": 0.9297,
80
+ "step": 900
81
+ },
82
+ {
83
+ "epoch": 0.06700251678203663,
84
+ "grad_norm": 0.73828125,
85
+ "learning_rate": 0.00018661306532663318,
86
+ "loss": 0.9297,
87
+ "step": 1000
88
+ },
89
+ {
90
+ "epoch": 0.07370276846024029,
91
+ "grad_norm": 0.69140625,
92
+ "learning_rate": 0.00018527303182579565,
93
+ "loss": 0.9218,
94
+ "step": 1100
95
+ },
96
+ {
97
+ "epoch": 0.08040302013844396,
98
+ "grad_norm": 0.73046875,
99
+ "learning_rate": 0.00018393299832495814,
100
+ "loss": 0.9129,
101
+ "step": 1200
102
+ },
103
+ {
104
+ "epoch": 0.0871032718166476,
105
+ "grad_norm": 0.70703125,
106
+ "learning_rate": 0.0001825929648241206,
107
+ "loss": 0.9156,
108
+ "step": 1300
109
+ },
110
+ {
111
+ "epoch": 0.09380352349485127,
112
+ "grad_norm": 0.71484375,
113
+ "learning_rate": 0.00018125293132328308,
114
+ "loss": 0.9151,
115
+ "step": 1400
116
+ },
117
+ {
118
+ "epoch": 0.10050377517305494,
119
+ "grad_norm": 1.0625,
120
+ "learning_rate": 0.00017991289782244557,
121
+ "loss": 0.9115,
122
+ "step": 1500
123
+ },
124
+ {
125
+ "epoch": 0.1072040268512586,
126
+ "grad_norm": 0.796875,
127
+ "learning_rate": 0.00017857286432160804,
128
+ "loss": 0.9114,
129
+ "step": 1600
130
+ },
131
+ {
132
+ "epoch": 0.11390427852946226,
133
+ "grad_norm": 0.6484375,
134
+ "learning_rate": 0.00017723283082077054,
135
+ "loss": 0.9064,
136
+ "step": 1700
137
+ },
138
+ {
139
+ "epoch": 0.12060453020766593,
140
+ "grad_norm": 0.69921875,
141
+ "learning_rate": 0.000175892797319933,
142
+ "loss": 0.9039,
143
+ "step": 1800
144
+ },
145
+ {
146
+ "epoch": 0.12730478188586958,
147
+ "grad_norm": 0.68359375,
148
+ "learning_rate": 0.00017455276381909548,
149
+ "loss": 0.8975,
150
+ "step": 1900
151
+ },
152
+ {
153
+ "epoch": 0.13400503356407326,
154
+ "grad_norm": 0.71484375,
155
+ "learning_rate": 0.00017321273031825794,
156
+ "loss": 0.8919,
157
+ "step": 2000
158
+ },
159
+ {
160
+ "epoch": 0.1407052852422769,
161
+ "grad_norm": 0.80078125,
162
+ "learning_rate": 0.00017187269681742044,
163
+ "loss": 0.8902,
164
+ "step": 2100
165
+ },
166
+ {
167
+ "epoch": 0.14740553692048058,
168
+ "grad_norm": 0.67578125,
169
+ "learning_rate": 0.00017053266331658293,
170
+ "loss": 0.8941,
171
+ "step": 2200
172
+ },
173
+ {
174
+ "epoch": 0.15410578859868423,
175
+ "grad_norm": 0.6875,
176
+ "learning_rate": 0.0001691926298157454,
177
+ "loss": 0.8928,
178
+ "step": 2300
179
+ },
180
+ {
181
+ "epoch": 0.1608060402768879,
182
+ "grad_norm": 0.7578125,
183
+ "learning_rate": 0.00016785259631490787,
184
+ "loss": 0.8889,
185
+ "step": 2400
186
+ },
187
+ {
188
+ "epoch": 0.16750629195509156,
189
+ "grad_norm": 0.69921875,
190
+ "learning_rate": 0.00016651256281407034,
191
+ "loss": 0.8825,
192
+ "step": 2500
193
+ },
194
+ {
195
+ "epoch": 0.1742065436332952,
196
+ "grad_norm": 0.703125,
197
+ "learning_rate": 0.00016517252931323284,
198
+ "loss": 0.8742,
199
+ "step": 2600
200
+ },
201
+ {
202
+ "epoch": 0.1809067953114989,
203
+ "grad_norm": 0.67578125,
204
+ "learning_rate": 0.00016383249581239533,
205
+ "loss": 0.878,
206
+ "step": 2700
207
+ },
208
+ {
209
+ "epoch": 0.18760704698970254,
210
+ "grad_norm": 0.66796875,
211
+ "learning_rate": 0.0001624924623115578,
212
+ "loss": 0.8679,
213
+ "step": 2800
214
+ },
215
+ {
216
+ "epoch": 0.19430729866790622,
217
+ "grad_norm": 0.6796875,
218
+ "learning_rate": 0.00016115242881072027,
219
+ "loss": 0.8618,
220
+ "step": 2900
221
+ },
222
+ {
223
+ "epoch": 0.20100755034610987,
224
+ "grad_norm": 0.67578125,
225
+ "learning_rate": 0.00015981239530988274,
226
+ "loss": 0.8695,
227
+ "step": 3000
228
+ },
229
+ {
230
+ "epoch": 0.20770780202431355,
231
+ "grad_norm": 0.62890625,
232
+ "learning_rate": 0.00015847236180904523,
233
+ "loss": 0.8769,
234
+ "step": 3100
235
+ },
236
+ {
237
+ "epoch": 0.2144080537025172,
238
+ "grad_norm": 0.6484375,
239
+ "learning_rate": 0.00015713232830820773,
240
+ "loss": 0.8665,
241
+ "step": 3200
242
+ },
243
+ {
244
+ "epoch": 0.22110830538072088,
245
+ "grad_norm": 0.7109375,
246
+ "learning_rate": 0.0001557922948073702,
247
+ "loss": 0.8572,
248
+ "step": 3300
249
+ },
250
+ {
251
+ "epoch": 0.22780855705892453,
252
+ "grad_norm": 0.65625,
253
+ "learning_rate": 0.00015445226130653266,
254
+ "loss": 0.8696,
255
+ "step": 3400
256
+ },
257
+ {
258
+ "epoch": 0.23450880873712818,
259
+ "grad_norm": 0.7578125,
260
+ "learning_rate": 0.00015311222780569513,
261
+ "loss": 0.8543,
262
+ "step": 3500
263
+ },
264
+ {
265
+ "epoch": 0.24120906041533186,
266
+ "grad_norm": 0.66015625,
267
+ "learning_rate": 0.00015177219430485763,
268
+ "loss": 0.8618,
269
+ "step": 3600
270
+ },
271
+ {
272
+ "epoch": 0.2479093120935355,
273
+ "grad_norm": 0.640625,
274
+ "learning_rate": 0.00015043216080402012,
275
+ "loss": 0.8553,
276
+ "step": 3700
277
+ },
278
+ {
279
+ "epoch": 0.25460956377173916,
280
+ "grad_norm": 0.640625,
281
+ "learning_rate": 0.0001490921273031826,
282
+ "loss": 0.8541,
283
+ "step": 3800
284
+ }
285
+ ],
286
+ "logging_steps": 100,
287
+ "max_steps": 14925,
288
+ "num_input_tokens_seen": 0,
289
+ "num_train_epochs": 1,
290
+ "save_steps": 100,
291
+ "stateful_callbacks": {
292
+ "TrainerControl": {
293
+ "args": {
294
+ "should_epoch_stop": false,
295
+ "should_evaluate": false,
296
+ "should_log": false,
297
+ "should_save": true,
298
+ "should_training_stop": false
299
+ },
300
+ "attributes": {}
301
+ }
302
+ },
303
+ "total_flos": 2.0541408970364707e+19,
304
+ "train_batch_size": 8,
305
+ "trial_name": null,
306
+ "trial_params": null
307
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d940005b5c994908f56007a770bd82c2946bbe8f6c6171913a46ea751f4d0b3
3
  size 6097
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d4374b07c1f986c7320718de1ddb6856a2d2ff7749e3e9b05e692ab2de670d7
3
  size 6097