loubnabnl HF Staff commited on
Commit
86fb3f2
·
verified ·
1 Parent(s): 8421991

Create stage1_8T.yaml

Browse files
Files changed (1) hide show
  1. stage1_8T.yaml +281 -0
stage1_8T.yaml ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 2000
3
+ checkpoints_path: /scratch/elie/checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ load_lr_scheduler: true
6
+ load_optimizer: true
7
+ resume_checkpoint_path: s3://smollm3/tp-fix-final-pre-training/1704-48n-part1
8
+ save_final_state: true
9
+ save_initial_state: false
10
+ data_stages:
11
+ - data:
12
+ dataset:
13
+ dataset_read_path:
14
+ - /scratch/smollm3-data-part1/fineweb-edu
15
+ - /scratch/smollm3-data-part1/dclm
16
+ - /scratch/smollm3-data-part1/pes2o
17
+ - /scratch/smollm3-data-part1/wiki
18
+ - /scratch/smollm3-data-part1/stackexchange
19
+ - /scratch/smollm3-data-part1/fw2-fra
20
+ - /scratch/smollm3-data-part1/fw2-spa
21
+ - /scratch/smollm3-data-part1/fw2-deu
22
+ - /scratch/smollm3-data-part1/fw2-ita
23
+ - /scratch/smollm3-data-part1/fw2-por
24
+ - /scratch/smollm3-data-part1/fw2-cmn
25
+ - /scratch/smollm3-data-part1/fw2-rus
26
+ - /scratch/smollm3-data-part1/fw2-fas
27
+ - /scratch/smollm3-data-part1/fw2-jpn
28
+ - /scratch/smollm3-data-part1/fw2-kor
29
+ - /scratch/smollm3-data-part1/fw2-hin
30
+ - /scratch/smollm3-data-part1/fw2-tha
31
+ - /scratch/smollm3-data-part1/fw2-vie
32
+ - /scratch/smollm3-data-part1/fw2-ell
33
+ - /scratch/smollm3-data-part1/infiwebmath
34
+ - /scratch/smollm3-data-part1/finemath
35
+ - /scratch/smollm3-data-part1/stack-edu-Python
36
+ - /scratch/smollm3-data-part1/stack-edu-Java
37
+ - /scratch/smollm3-data-part1/stack-edu-JavaScript
38
+ - /scratch/smollm3-data-part1/stack-edu-C
39
+ - /scratch/smollm3-data-part1/stack-edu-Cpp
40
+ - /scratch/smollm3-data-part1/stack-edu-C-Sharp
41
+ - /scratch/smollm3-data-part1/stack-edu-PHP
42
+ - /scratch/smollm3-data-part1/stack-edu-TypeScript
43
+ - /scratch/smollm3-data-part1/stack-edu-Swift
44
+ - /scratch/smollm3-data-part1/stack-edu-SQL
45
+ - /scratch/smollm3-data-part1/stack-edu-Ruby
46
+ - /scratch/smollm3-data-part1/stack-edu-Markdown
47
+ - /scratch/smollm3-data-part1/stack-edu-HTML
48
+ - /scratch/smollm3-data-part1/stack-edu-Rust
49
+ - /scratch/smollm3-data-part1/stack-edu-Go
50
+ - /scratch/smollm3-data-part1/stack-edu-Shell
51
+ - /scratch/smollm3-data-part1/pull-requests
52
+ - /scratch/smollm3-data-part1/kaggle
53
+ - /scratch/smollm3-data-part1/jupyter-scripts
54
+ - /scratch/smollm3-data-part1/github-issues
55
+ dataset_folder:
56
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/fineweb-edu/fineweb-edu/
57
+ - s3://smollm3/datasets/llama_tokenized-global-chunks/dclm/dclm/
58
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pes2o/
59
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/wiki/
60
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stackexchange/
61
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fra/
62
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-spa/
63
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-deu/
64
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ita/
65
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-por/
66
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-cmn/
67
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-rus/
68
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-fas/
69
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-jpn/
70
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-kor/
71
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-hin/
72
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-tha/
73
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-vie/
74
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/fw2-ell/
75
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/infiwebmath/
76
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/finemath/
77
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Python/
78
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Java/
79
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-JavaScript/
80
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-C/
81
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Cpp/
82
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-C-Sharp/
83
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-PHP/
84
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-TypeScript/
85
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Swift/
86
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-SQL/
87
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Ruby/
88
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Markdown/
89
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-HTML/
90
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Rust/
91
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Go/
92
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Shell/
93
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/pull-requests/
94
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/kaggle/
95
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/jupyter-scripts/
96
+ - s3://smollm3/datasets/llama_tokenized-individual-chunks/github-issues/
97
+ dataset_weights:
98
+ - 0.333
99
+ - 0.37
100
+ - 0.02
101
+ - 0.001
102
+ - 0.004
103
+ - 0.016
104
+ - 0.02
105
+ - 0.022
106
+ - 0.0105
107
+ - 0.01
108
+ - 0.01
109
+ - 0.01
110
+ - 0.003
111
+ - 0.00325
112
+ - 0.00325
113
+ - 0.00325
114
+ - 0.00325
115
+ - 0.00325
116
+ - 0.00225
117
+ - 0.01
118
+ - 0.017
119
+ - 0.025
120
+ - 0.013
121
+ - 0.013
122
+ - 0.007
123
+ - 0.018
124
+ - 0.006
125
+ - 0.006
126
+ - 0.003
127
+ - 0.001
128
+ - 0.004
129
+ - 0.0008
130
+ - 0.005
131
+ - 0.006
132
+ - 0.0008
133
+ - 0.0005
134
+ - 0.0007
135
+ - 0.006
136
+ - 0.0005
137
+ - 0.0055
138
+ - 0.0032
139
+ pad_samples_to_global_batch_size: false
140
+ return_positions: true
141
+ token_size_in_bytes: 4
142
+ tokenizer_name: meta-llama/Llama-3.2-1B
143
+ use_old_brrr_dataloader: false
144
+ vocab_size: 128256
145
+ num_loading_workers: 0
146
+ seed: 6
147
+ name: stable
148
+ start_training_step: 1
149
+ general:
150
+ benchmark_csv_path: null
151
+ consumed_train_samples: null
152
+ ignore_sanity_checks: true
153
+ project: smollm3-3B-final
154
+ run: elie-48n-1704-part1
155
+ seed: 6
156
+ step: null
157
+ logging:
158
+ iteration_step_info_interval: 1
159
+ log_level: info
160
+ log_level_replica: info
161
+ model:
162
+ ddp_bucket_cap_mb: 50
163
+ dtype: bfloat16
164
+ init_method:
165
+ std: 0.02
166
+ make_vocab_size_divisible_by: 1
167
+ model_config:
168
+ _attn_implementation: flash_attention_2
169
+ _fused_rms_norm: true
170
+ _fused_rotary_emb: true
171
+ _use_doc_masking: true
172
+ _use_qkv_packed: true
173
+ attention_bias: false
174
+ bos_token_id: 128000
175
+ eos_token_id: 128001
176
+ flex_attention_mask: null
177
+ hidden_act: silu
178
+ hidden_size: 2048
179
+ initializer_range: 0.02
180
+ intermediate_size: 11008
181
+ is_qwen2_config: true
182
+ max_position_embeddings: 4096
183
+ moe_config: null
184
+ num_attention_heads: 16
185
+ num_hidden_layers: 36
186
+ num_key_value_heads: 4
187
+ pad_token_id: null
188
+ pretraining_tp: 2
189
+ rms_norm_eps: 1.0e-06
190
+ rope_interleaved: false
191
+ rope_scaling: null
192
+ rope_theta: 50000.0
193
+ sliding_window_size: null
194
+ tie_word_embeddings: true
195
+ use_cache: true
196
+ vocab_size: 128256
197
+ z_loss_coefficient: 1.0e-05
198
+ z_loss_enabled: false
199
+ no_rope_layer: 4
200
+ optimizer:
201
+ accumulate_grad_in_fp32: true
202
+ clip_grad: 1.0
203
+ learning_rate_scheduler:
204
+ learning_rate: 0.0002
205
+ lr_decay_starting_step: 2600000
206
+ lr_decay_steps: 600000
207
+ lr_decay_style: linear
208
+ lr_warmup_steps: 2000
209
+ lr_warmup_style: linear
210
+ min_decay_lr: 0
211
+ optimizer_factory:
212
+ adam_beta1: 0.9
213
+ adam_beta2: 0.95
214
+ adam_eps: 1.0e-08
215
+ name: adamW
216
+ torch_adam_is_fused: true
217
+ weight_decay: 0.1
218
+ weight_decay_exclude_named_params:
219
+ - .*token_embedding.*
220
+ zero_stage: 0
221
+ parallelism:
222
+ context_parallel_size: 1
223
+ dp: 192
224
+ expert_parallel_size: 1
225
+ moe_layer_recompute: false
226
+ pp: 1
227
+ pp_engine: 1f1b
228
+ recompute_layer: false
229
+ tp: 2
230
+ tp_linear_async_communication: true
231
+ tp_mode: REDUCE_SCATTER
232
+ tp_recompute_allgather: true
233
+ profiler: null
234
+ s3_upload:
235
+ remove_after_upload: true
236
+ s5cmd_concurrency: 5
237
+ s5cmd_numworkers: 16
238
+ s5cmd_path: /fsx/elie_bakouch/smollm3_training/1004-nn/1004-hope/bin/s5cmd
239
+ upload_s3_path: s3://smollm3/tp-fix-final-pre-training/1704-48n-part1
240
+ tokenizer:
241
+ tokenizer_max_length: 4096
242
+ tokenizer_name_or_path: meta-llama/Llama-3.2-1B
243
+ tokenizer_revision: null
244
+ metrics_logging:
245
+ log_level: 1
246
+ log_detail_interval: 200
247
+ tokens:
248
+ batch_accumulation_per_replica: 1
249
+ limit_test_batches: 0
250
+ limit_val_batches: 0
251
+ micro_batch_size: 3
252
+ sequence_length: 4096
253
+ train_steps: 3200000
254
+ val_check_interval: 100
255
+ lighteval:
256
+ slurm_script_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/launch-config" # Default path for launch scripts
257
+ logs_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/logs" # Default path for evaluation logs
258
+ local_checkpoint_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/evals-ckpt" # Default path for temporary checkpoint storage. Will store under {local_checkpoint_dir}/{run_name}/{step}
259
+ nanotron_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron"
260
+ output_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/results"
261
+ s3_save_path: "s3://smollm3/eval-prod"
262
+ upload_to_wandb: true
263
+ wandb_project: smollm3-3B-evals
264
+ wandb_entity: huggingface
265
+ parallelism:
266
+ dp: 4
267
+ pp: 1
268
+ tp: 2
269
+ tp_linear_async_communication: true
270
+ batch_size: 8 # Optional batch size for evaluation
271
+ eval_config_override: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/smollm3_eval.yaml" # The evaluation config file to use
272
+ eval_interval: 4000
273
+ eval_interval_file: null
274
+ slurm:
275
+ gpus_per_node: 8
276
+ partition: "hopper-prod"
277
+ hf_cache: "/fsx/elie_bakouch/.cache/huggingface"
278
+ cpus_per_task: 88
279
+ qos: "normal"
280
+ time: "01:59:00"
281
+ reservation: smollm # Optional reservation name, can be null