loubnabnl HF Staff commited on
Commit
c8284ff
·
verified ·
1 Parent(s): 86fb3f2

Create stage2_8T_9T.yaml

Browse files
Files changed (1) hide show
  1. stage2_8T_9T.yaml +434 -0
stage2_8T_9T.yaml ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 2000
3
+ checkpoints_path: /scratch/loubna-checkpoints-stage2
4
+ checkpoints_path_is_shared_file_system: false
5
+ load_lr_scheduler: true
6
+ load_optimizer: true
7
+ resume_checkpoint_path: s3://smollm3/tp-fix-final-pre-training/1205-48n-part1-fix/3450000
8
+ save_final_state: true
9
+ save_initial_state: false
10
+ data_stages:
11
+ - data:
12
+ dataset:
13
+ dataset_read_path:
14
+ - /scratch/smollm3-data-part1/fineweb-edu
15
+ - /scratch/smollm3-data-part1/dclm
16
+ - /scratch/smollm3-data-part1/pes2o
17
+ - /scratch/smollm3-data-part1/wiki
18
+ - /scratch/smollm3-data-part1/stackexchange
19
+ - /scratch/smollm3-data-part1/fw2-fra
20
+ - /scratch/smollm3-data-part1/fw2-spa
21
+ - /scratch/smollm3-data-part1/fw2-deu
22
+ - /scratch/smollm3-data-part1/fw2-ita
23
+ - /scratch/smollm3-data-part1/fw2-por
24
+ - /scratch/smollm3-data-part1/fw2-cmn
25
+ - /scratch/smollm3-data-part1/fw2-rus
26
+ - /scratch/smollm3-data-part1/fw2-fas
27
+ - /scratch/smollm3-data-part1/fw2-jpn
28
+ - /scratch/smollm3-data-part1/fw2-kor
29
+ - /scratch/smollm3-data-part1/fw2-hin
30
+ - /scratch/smollm3-data-part1/fw2-tha
31
+ - /scratch/smollm3-data-part1/fw2-vie
32
+ - /scratch/smollm3-data-part1/fw2-ell
33
+ - /scratch/smollm3-data-part1/infiwebmath
34
+ - /scratch/smollm3-data-part1/finemath
35
+ - /scratch/smollm3-data-part1/stack-edu-Python
36
+ - /scratch/smollm3-data-part1/stack-edu-Java
37
+ - /scratch/smollm3-data-part1/stack-edu-JavaScript
38
+ - /scratch/smollm3-data-part1/stack-edu-C
39
+ - /scratch/smollm3-data-part1/stack-edu-Cpp
40
+ - /scratch/smollm3-data-part1/stack-edu-C-Sharp
41
+ - /scratch/smollm3-data-part1/stack-edu-PHP
42
+ - /scratch/smollm3-data-part1/stack-edu-TypeScript
43
+ - /scratch/smollm3-data-part1/stack-edu-Swift
44
+ - /scratch/smollm3-data-part1/stack-edu-SQL
45
+ - /scratch/smollm3-data-part1/stack-edu-Ruby
46
+ - /scratch/smollm3-data-part1/stack-edu-Markdown
47
+ - /scratch/smollm3-data-part1/stack-edu-HTML
48
+ - /scratch/smollm3-data-part1/stack-edu-Rust
49
+ - /scratch/smollm3-data-part1/stack-edu-Go
50
+ - /scratch/smollm3-data-part1/stack-edu-Shell
51
+ - /scratch/smollm3-data-part1/pull-requests
52
+ - /scratch/smollm3-data-part1/kaggle
53
+ - /scratch/smollm3-data-part1/jupyter-scripts
54
+ - /scratch/smollm3-data-part1/github-issues
55
+ dataset_folder:
56
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/fineweb-edu/fineweb-edu/
57
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/dclm/dclm/
58
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pes2o/
59
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/wiki/
60
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stackexchange/
61
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fra/
62
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-spa/
63
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-deu/
64
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ita/
65
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-por/
66
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-cmn/
67
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-rus/
68
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fas/
69
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-jpn/
70
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-kor/
71
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-hin/
72
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-tha/
73
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-vie/
74
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ell/
75
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/infiwebmath/
76
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/finemath/
77
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Python/
78
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Java/
79
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-JavaScript/
80
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-C/
81
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Cpp/
82
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-C-Sharp/
83
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-PHP/
84
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-TypeScript/
85
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Swift/
86
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-SQL/
87
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Ruby/
88
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Markdown/
89
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-HTML/
90
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Rust/
91
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Go/
92
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Shell/
93
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pull-requests/
94
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/kaggle/
95
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/jupyter-scripts/
96
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/github-issues/
97
+ dataset_weights:
98
+ - 0.333
99
+ - 0.37
100
+ - 0.02
101
+ - 0.001
102
+ - 0.004
103
+ - 0.016
104
+ - 0.02
105
+ - 0.022
106
+ - 0.0105
107
+ - 0.01
108
+ - 0.01
109
+ - 0.01
110
+ - 0.003
111
+ - 0.00325
112
+ - 0.00325
113
+ - 0.00325
114
+ - 0.00325
115
+ - 0.00325
116
+ - 0.00225
117
+ - 0.01
118
+ - 0.017
119
+ - 0.025
120
+ - 0.013
121
+ - 0.013
122
+ - 0.007
123
+ - 0.018
124
+ - 0.006
125
+ - 0.006
126
+ - 0.003
127
+ - 0.001
128
+ - 0.004
129
+ - 0.0008
130
+ - 0.005
131
+ - 0.006
132
+ - 0.0008
133
+ - 0.0005
134
+ - 0.0007
135
+ - 0.006
136
+ - 0.0005
137
+ - 0.0055
138
+ - 0.0032
139
+ pad_samples_to_global_batch_size: false
140
+ return_positions: true
141
+ token_size_in_bytes: 4
142
+ tokenizer_name: meta-llama/Llama-3.2-1B
143
+ use_old_brrr_dataloader: false
144
+ vocab_size: 128256
145
+ num_loading_workers: 0
146
+ seed: 6
147
+ name: stable
148
+ start_training_step: 1
149
+ - data:
150
+ dataset:
151
+ dataset_read_path: # replace some programming languages with stack-edu-real, add finemath4+ and infiwebmath4+ and MegaMath, donwsample some sources like SE
152
+ - /scratch/smollm3-data-part1/fineweb-edu
153
+ - /scratch/smollm3-data-part1/dclm
154
+ - /scratch/smollm3-data-part1/pes2o
155
+ - /scratch/smollm3-data-part1/wiki
156
+ - /scratch/smollm3-data-part1/stackexchange
157
+ - /scratch/smollm3-data-part1/fw2-fra
158
+ - /scratch/smollm3-data-part1/fw2-spa
159
+ - /scratch/smollm3-data-part1/fw2-deu
160
+ - /scratch/smollm3-data-part1/fw2-ita
161
+ - /scratch/smollm3-data-part1/fw2-por
162
+ - /scratch/smollm3-data-part1/fw2-cmn
163
+ - /scratch/smollm3-data-part1/fw2-rus
164
+ - /scratch/smollm3-data-part1/fw2-fas
165
+ - /scratch/smollm3-data-part1/fw2-jpn
166
+ - /scratch/smollm3-data-part1/fw2-kor
167
+ - /scratch/smollm3-data-part1/fw2-hin
168
+ - /scratch/smollm3-data-part1/fw2-tha
169
+ - /scratch/smollm3-data-part1/fw2-vie
170
+ - /scratch/smollm3-data-part1/fw2-ell
171
+ - /scratch/smollm3-data-part1/infiwebmath
172
+ - /scratch/smollm3-data-part1/finemath
173
+ - /scratch/smollm3-data-part1/infiwebmath-4plus
174
+ - /scratch/smollm3-data-part1/finemath-4plus
175
+ - /scratch/smollm3-data-part1/megamath-web-pro
176
+ - /scratch/smollm3-data-part1/megamath-qa-qwen
177
+ - /scratch/smollm3-data-part1/megamath-text-code-block
178
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-Python
179
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-Java
180
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-JavaScript
181
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-C
182
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-Cpp
183
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-C-Sharp
184
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-PHP
185
+ - /scratch/smollm3-data-part1/stack-edu-TypeScript
186
+ - /scratch/smollm3-data-part1/stack-edu-Swift
187
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-SQL
188
+ - /scratch/smollm3-data-part1/stack-edu-Ruby
189
+ - /scratch/smollm3-data-part1/stack-edu-real-shuffled-Markdown
190
+ - /scratch/smollm3-data-part1/stack-edu-HTML
191
+ - /scratch/smollm3-data-part1/stack-edu-Rust
192
+ - /scratch/smollm3-data-part1/stack-edu-Go
193
+ - /scratch/smollm3-data-part1/stack-edu-Shell
194
+ - /scratch/smollm3-data-part1/pull-requests
195
+ - /scratch/smollm3-data-part1/kaggle
196
+ - /scratch/smollm3-data-part1/jupyter-scripts
197
+ - /scratch/smollm3-data-part1/github-issues
198
+ dataset_folder:
199
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/fineweb-edu/fineweb-edu/
200
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/dclm/dclm/
201
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pes2o/
202
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/wiki/
203
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stackexchange/
204
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fra/
205
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-spa/
206
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-deu/
207
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ita/
208
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-por/
209
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-cmn/
210
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-rus/
211
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fas/
212
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-jpn/
213
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-kor/
214
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-hin/
215
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-tha/
216
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-vie/
217
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ell/
218
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/infiwebmath/
219
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/finemath/
220
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/infiwebmath-4plus/
221
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/finemath-4plus/
222
+ - s3://smollm3/datasets/llama_tokenized_4097/megamath-web-pro/standard/
223
+ - s3://smollm3/datasets/llama_tokenized_4097/megamath-qa-qwen/standard/
224
+ - s3://smollm3/datasets/llama_tokenized_4097/megamath-text-code-block/tokenized/
225
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-Python/
226
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-Java/
227
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-JavaScript/
228
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-C/
229
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-Cpp/
230
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-C-Sharp/
231
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-PHP/
232
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-TypeScript/
233
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Swift/
234
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-SQL/
235
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Ruby/
236
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-real-shuffled-Markdown/
237
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-HTML/
238
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Rust/
239
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Go/
240
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Shell/
241
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pull-requests/
242
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/kaggle/
243
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/jupyter-scripts/
244
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/github-issues/
245
+ dataset_weights:
246
+ - 0.30
247
+ - 0.33
248
+ - 0.016
249
+ - 0.001
250
+ - 0.002
251
+ - 0.016
252
+ - 0.02
253
+ - 0.0232
254
+ - 0.0105
255
+ - 0.01
256
+ - 0.01
257
+ - 0.01
258
+ - 0.002
259
+ - 0.00325
260
+ - 0.00325
261
+ - 0.00325
262
+ - 0.00325
263
+ - 0.00005 # downsample viet, too many epochs
264
+ - 0.00225
265
+ - 0.01 # update infiwebmath3+
266
+ - 0.01 # update finemath3+
267
+ - 0.01 # add infiwebmath4+
268
+ - 0.02 # add finemath4+
269
+ - 0.02 # add MegamathWeb-Pro
270
+ - 0.0008 # MegaMathQA-Qwen
271
+ - 0.02 # add MegaMath-Text-Code
272
+ - 0.025 # add stack-edu-real-shuffled-Python
273
+ - 0.01725 # Java
274
+ - 0.01625 # JS
275
+ - 0.007 # C
276
+ - 0.018 # add stack-edu-real-shuffled-Cpp
277
+ - 0.006 # C#
278
+ - 0.004 # PHP
279
+ - 0.003 # TS
280
+ - 0.001 # Swift
281
+ - 0.006 # SQL
282
+ - 0.0002 # Ruby
283
+ - 0.00611 # Md
284
+ - 0.00614 # HTML
285
+ - 0.0008 # Rust
286
+ - 0.0005 # Go
287
+ - 0.0001 # Shell
288
+ - 0.0114 # PRs
289
+ - 0.0005 # kaggle
290
+ - 0.01 # notebooks
291
+ - 0.004 # issues
292
+ pad_samples_to_global_batch_size: false
293
+ return_positions: true
294
+ token_size_in_bytes: 4
295
+ tokenizer_name: meta-llama/Llama-3.2-1B
296
+ use_old_brrr_dataloader: false
297
+ vocab_size: 128256
298
+ num_loading_workers: 0
299
+ seed: 6
300
+ name: stable stage 2
301
+ start_training_step: 3450001
302
+ general:
303
+ benchmark_csv_path: null
304
+ consumed_train_samples: null
305
+ ignore_sanity_checks: true
306
+ project: smollm3-3B-final
307
+ run: elie-48n-2105-stage2
308
+ seed: 6
309
+ step: null
310
+ logging:
311
+ iteration_step_info_interval: 1
312
+ log_level: info
313
+ log_level_replica: info
314
+ model:
315
+ ddp_bucket_cap_mb: 50
316
+ dtype: bfloat16
317
+ init_method:
318
+ std: 0.02
319
+ make_vocab_size_divisible_by: 1
320
+ model_config:
321
+ _attn_implementation: flash_attention_2
322
+ _fused_rms_norm: true
323
+ _fused_rotary_emb: true
324
+ _use_doc_masking: true
325
+ _use_qkv_packed: true
326
+ attention_bias: false
327
+ bos_token_id: 128000
328
+ eos_token_id: 128001
329
+ flex_attention_mask: null
330
+ hidden_act: silu
331
+ hidden_size: 2048
332
+ initializer_range: 0.02
333
+ intermediate_size: 11008
334
+ is_qwen2_config: true
335
+ max_position_embeddings: 4096
336
+ moe_config: null
337
+ num_attention_heads: 16
338
+ num_hidden_layers: 36
339
+ num_key_value_heads: 4
340
+ pad_token_id: null
341
+ pretraining_tp: 2
342
+ rms_norm_eps: 1.0e-06
343
+ rope_interleaved: false
344
+ rope_scaling: null
345
+ rope_theta: 50000.0
346
+ sliding_window_size: null
347
+ tie_word_embeddings: true
348
+ use_cache: true
349
+ vocab_size: 128256
350
+ z_loss_coefficient: 1.0e-05
351
+ z_loss_enabled: false
352
+ no_rope_layer: 4
353
+ optimizer:
354
+ accumulate_grad_in_fp32: true
355
+ clip_grad: 1.0
356
+ learning_rate_scheduler:
357
+ learning_rate: 0.0002
358
+ lr_decay_starting_step: 4000000
359
+ lr_decay_steps: 0
360
+ lr_decay_style: linear
361
+ lr_warmup_steps: 2000
362
+ lr_warmup_style: linear
363
+ min_decay_lr: 0
364
+ optimizer_factory:
365
+ adam_beta1: 0.9
366
+ adam_beta2: 0.95
367
+ adam_eps: 1.0e-08
368
+ name: adamW
369
+ torch_adam_is_fused: true
370
+ weight_decay: 0.1
371
+ weight_decay_exclude_named_params:
372
+ - .*token_embedding.*
373
+ zero_stage: 0
374
+ parallelism:
375
+ context_parallel_size: 1
376
+ dp: 192
377
+ expert_parallel_size: 1
378
+ moe_layer_recompute: false
379
+ pp: 1
380
+ pp_engine: 1f1b
381
+ recompute_layer: false
382
+ tp: 2
383
+ tp_linear_async_communication: true
384
+ tp_mode: REDUCE_SCATTER
385
+ tp_recompute_allgather: true
386
+ profiler: null
387
+ s3_upload:
388
+ remove_after_upload: true
389
+ s5cmd_concurrency: 5
390
+ s5cmd_numworkers: 16
391
+ s5cmd_path: /fsx/loubna/.venv-2-6-cu124/bin/s5cmd
392
+ upload_s3_path: s3://smollm3/tp-fix-final-pre-training/elie-2105-stage2
393
+ tokenizer:
394
+ tokenizer_max_length: 4096
395
+ tokenizer_name_or_path: meta-llama/Llama-3.2-1B
396
+ tokenizer_revision: null
397
+ metrics_logging:
398
+ log_level: 1
399
+ log_detail_interval: 200
400
+ tokens:
401
+ batch_accumulation_per_replica: 1
402
+ limit_test_batches: 0
403
+ limit_val_batches: 0
404
+ micro_batch_size: 3
405
+ sequence_length: 4096
406
+ train_steps: 4000000
407
+ val_check_interval: 100
408
+ lighteval:
409
+ slurm_script_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/launch-config" # Default path for launch scripts
410
+ logs_path: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/logs" # Default path for evaluation logs
411
+ local_checkpoint_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/evals-ckpt" # Default path for temporary checkpoint storage. Will store under {local_checkpoint_dir}/{run_name}/{step}
412
+ nanotron_path: "/fsx/loubna/projects_v2/smollm3/nanotron"
413
+ output_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/results"
414
+ s3_save_path: "s3://smollm3/eval-prod"
415
+ upload_to_wandb: true
416
+ wandb_project: smollm3-3B-evals
417
+ wandb_entity: huggingface
418
+ parallelism:
419
+ dp: 4
420
+ pp: 1
421
+ tp: 2
422
+ tp_linear_async_communication: true
423
+ batch_size: 8 # Optional batch size for evaluation
424
+ eval_config_override: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_configs/smollm3_eval.yaml" # The evaluation config file to use
425
+ eval_interval: 4000
426
+ eval_interval_file: null
427
+ slurm:
428
+ gpus_per_node: 8
429
+ partition: "hopper-prod"
430
+ hf_cache: "/fsx/loubna/.cache/huggingface"
431
+ cpus_per_task: 88
432
+ qos: "normal"
433
+ time: "01:59:00"
434
+ reservation: smollm # Optional reservation name, can be null