File size: 17,690 Bytes
e16256f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
checkpoints:
  checkpoint_interval: 2000
  checkpoints_path: /scratch/elie-checkpoints-stage2-lc
  checkpoints_path_is_shared_file_system: false
  load_lr_scheduler: false
  load_optimizer: false
  resume_checkpoint_path: s3://smollm3/tp-fix-final-pre-training/elie-lc-prolong-think-chatml-mix01/20000
  save_final_state: true
  save_initial_state: false
data_stages:
- data:
    dataset:
      dataset_read_path: # replace some programming languages with stack-edu-real, add finemath4+ and infiwebmath4+ and MegaMath, donwsample some sources like SE
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fineweb-edu
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/dclm
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/pes2o
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/wiki
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/mwiki
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/cosmopedia2
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stackexchange
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-fra_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-spa_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-deu_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-ita_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-por_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-cmn_Hani
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-rus_Cyrl
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-arb_Arab
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-jpn_Jpan
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-kor_Hang
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hin_Deva
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-tha_Thai
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-vie_Latn
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/fw2-hq-ell_Grek
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/infiwebmath-3plus
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/finemath-3plus
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/infiwebmath-4plus/tokenized
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/finemath-4plus
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/megamath-web-pro
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/megamath-qa-qwen
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/megamath-text-code-block
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Python
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Java
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-JavaScript
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-C
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Cpp
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-C-Sharp
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-PHP
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-TypeScript
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Swift
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-SQL
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Ruby
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Markdown
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-HTML
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Rust
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Go
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/stack-edu-real-Shell
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/pull-requests
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/kaggle
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/github-issues
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/openmathinstruct-2
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/openmathreasoning-4k
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/opencodereasoning
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/natural_reasoning
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/problem-solving
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/2students
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/long-context/reasoning_nochatml_17M_4k_to_32k-correct-ipc_cpu_tok_4k_to_32k
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/long-context/reasoning_tulu_chatml_580k_all_lengths-correct_cpu_tok_4k_to_32k
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/Wiki_MCQ_final_14M
      - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data_64k/short-context/code_sft_tulu_nemotron_180k
      dataset_folder:
      - s3://smollm3/datasets/llama_tokenized_65537/fineweb-edu/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/dclm_correct/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/pes2o/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/wiki/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/mwiki/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/cosmopedia2/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stackexchange/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-fra_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-spa_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-deu_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-ita_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-por_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-cmn_Hani/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-rus_Cyrl/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-arb_Arab/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-jpn_Jpan/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-kor_Hang/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hin_Deva/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-tha_Thai/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-vie_Latn/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/fw2-hq-ell_Grek/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/infiwebmath-3plus/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/finemath-3plus/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/infiwebmath-4plus/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/finemath-4plus/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/megamath-web-pro/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/megamath-qa-qwen/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/megamath-text-code-block/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Python/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Java/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-JavaScript/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-C/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Cpp/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-C-Sharp/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-PHP/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-TypeScript/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Swift/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-SQL/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Ruby/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Markdown/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-HTML/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Rust/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Go/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/stack-edu-real-Shell/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/pull-requests/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/kaggle/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/github-issues/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/openmathinstruct-2/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/openmathreasoning-4k/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/opencodereasoning/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/natural_reasoning/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/problem-solving/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/2students/tokenized/
      - s3://smollm3/datasets/elie_correct_llama_tokenized_65537/reasoning_nochatml_17M_4k_to_32k-correct-ipc_cpu_tok_4k_to_32k/tokenized/
      - s3://smollm3/datasets/elie_correct_llama_tokenized_65537/reasoning_tulu_chatml_580k_all_lengths-correct_cpu_tok_4k_to_32k/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/Wiki_MCQ_final_14M/tokenized/
      - s3://smollm3/datasets/llama_tokenized_65537/code_sft_tulu_nemotron_180k/tokenized/
      dataset_weights:
      - 0.1519317
      - 0.2466434
      - 0.0017910 #pes2o
      - 0.0001791 # Wiki
      - 0.0021638 # mwiki remove 0.005
      - 0.0035819 # cosmopedia
      - 0.0008955 # SE
      - 0.0161186
      - 0.0197005
      - 0.0205960
      - 0.0111935
      - 0.0040297
      - 0.0089548
      - 0.0089548
      - 0.0080593
      - 0.0018655 # remove 0.001 from Jpn
      - 0.0018655 # remove 0.001 from kor
      - 0.0028655 # remove 0.001 from hin
      - 0.0028655 # remove 0.001 from thai
      - 0.0000448 # downsample viet, too many epochs
      - 0.0009701 # remove 0.001 from greek
      - 0.0017910 # update infiwebmath3+
      - 0.0017910 # update finemath3+
      - 0.0079096 # add infiwebmath4+ remove 0.01
      - 0.0223869 # add finemath4+
      - 0.0105367 # add MegamathWeb-Pro remove 0.002
      - 0.0017910 # MegaMathQA-Qwen
      - 0.0467739 # add MegaMath-Text-Code ad 0.002 = 100M tokens
      - 0.0646834 # add stack-edu-real-shuffled-Python add 0.002
      - 0.0161186 # Java
      - 0.0161186 # JS
      - 0.0071638 # C
      - 0.0394010 # add stack-edu-real-shuffled-Cpp
      - 0.0053729 # C# Edu
      - 0.0053729 # PHP
      - 0.0026864 # TS
      - 0.0017910  # Swift
      - 0.0116412 # SQL
      - 0.0008955 # Ruby
      - 0.0044774 # Md
      - 0.0091339 # HTML
      - 0.0008955 # Rust
      - 0.0004477 # Go
      - 0.0053729 # Shell
      - 0.0044774 # PRs
      - 0.0005373 # kaggle
      - 0.0015819 # issues remove 0.002, doesn't impact code perf
      - 0.0243869 # OpenMathInstruct add 0.002 = 50M tokens
      - 0.0124774 # OpenMathReasoning add 0.008 = 350M tokens
      - 0.0004477 # OpenCodeReasoning
      - 0.0008955 # Natural reasoning
      - 0.0026864 # Olmo Problem solving
      - 0.0026864 # Olmo two students
      - 0.08    # 4B Reasoning data
      - 0.01     # 500M tokens
      - 0.04 # 2B tokens from wiki mcq
      - 0.02 # 1B tokens from code tulu nemo
      pad_samples_to_global_batch_size: false
      return_positions: true
      token_size_in_bytes: 4
      tokenizer_name: HuggingFaceTB/smollm3-11T-4k-remote-code
      use_old_brrr_dataloader: false
      vocab_size: 128256
    num_loading_workers: 0
    seed: 6
  name: lc stage
  start_training_step: 1
general:
  benchmark_csv_path: null
  consumed_train_samples: null
  ignore_sanity_checks: true
  project: smollm3-3B-final
  run: elie-lc-prolong-start_32k-5Mrope-22k-steps
  seed: 6
  step: null
logging:
  iteration_step_info_interval: 1
  log_level: info
  log_level_replica: info
model:
  ddp_bucket_cap_mb: 50
  dtype: bfloat16
  init_method:
    std: 0.02
  make_vocab_size_divisible_by: 1
  model_config:
    _attn_implementation: llama3_ring_attention
    _fused_rms_norm: true
    _fused_rotary_emb: true
    _use_doc_masking: true
    _use_qkv_packed: true
    ring_attn_heads_k_stride: 1
    attention_bias: false
    bos_token_id: 128000
    eos_token_id: 128001
    flex_attention_mask: null
    hidden_act: silu
    hidden_size: 2048
    initializer_range: 0.02
    intermediate_size: 11008
    is_qwen2_config: true
    max_position_embeddings: 65536
    moe_config: null
    num_attention_heads: 16
    num_hidden_layers: 36
    num_key_value_heads: 4
    pad_token_id: null
    pretraining_tp: 2
    rms_norm_eps: 1.0e-06
    rope_interleaved: false
    rope_scaling: null
    rope_theta: 5000000.0
    sliding_window_size: null
    tie_word_embeddings: true
    use_cache: true
    vocab_size: 128256
    z_loss_coefficient: 1.0e-05
    z_loss_enabled: false
    no_rope_layer: 4
optimizer:
  accumulate_grad_in_fp32: true
  clip_grad: 1.0
  learning_rate_scheduler:
    learning_rate: 0.00002
    lr_decay_starting_step: 1000
    lr_decay_steps: 21000
    lr_decay_style: cosine
    lr_warmup_steps: 1000
    lr_warmup_style: linear
    min_decay_lr: 0
  optimizer_factory:
    adam_beta1: 0.9
    adam_beta2: 0.95
    adam_eps: 1.0e-08
    name: adamW
    torch_adam_is_fused: true
  weight_decay: 0.1
  weight_decay_exclude_named_params:
  - .*token_embedding.* 
  zero_stage: 0
parallelism:
  context_parallel_size: 4
  dp: 12
  expert_parallel_size: 1
  moe_layer_recompute: false
  pp: 1
  pp_engine: 1f1b
  recompute_layer: true
  tp: 2
  tp_linear_async_communication: true
  tp_mode: REDUCE_SCATTER
  tp_recompute_allgather: true
s3_upload:
  remove_after_upload: true
  s5cmd_concurrency: 5
  s5cmd_numworkers: 16
  s5cmd_path: /fsx/elie_bakouch/smollm3_training/1004-nn/1004-hope/bin/s5cmd
  upload_s3_path: s3://smollm3/tp-fix-final-pre-training/elie-lc-prolong-start_32k-5Mrope-22k-steps
tokenizer:
  tokenizer_max_length: 65536
  tokenizer_name_or_path: HuggingFaceTB/smollm3-11T-4k-remote-code
  tokenizer_revision: null
metrics_logging:
  log_level: 1
  log_detail_interval: 200
tokens:
  batch_accumulation_per_replica: 3
  limit_test_batches: 0
  limit_val_batches: 0
  micro_batch_size: 1
  sequence_length: 65536
  train_steps: 22000
  val_check_interval: 100
lighteval:
  slurm_script_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/launch-config"  # Default path for launch scripts
  logs_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/logs"  # Default path for evaluation logs
  local_checkpoint_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/evals-ckpt"  # Default path for temporary checkpoint storage. Will store under {local_checkpoint_dir}/{run_name}/{step}
  nanotron_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron"
  output_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/results"
  s3_save_path: "s3://smollm3/eval-prod"
  upload_to_wandb: true
  wandb_project: smollm3-3B-evals
  wandb_entity: huggingface
  parallelism:
    dp: 4
    pp: 1
    tp: 2
    tp_linear_async_communication: true
  batch_size: 8  # Optional batch size for evaluation
  eval_config_override: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/smollm3_eval_decay.yaml"  # The evaluation config file to use
  eval_interval: 400000
  eval_interval_file: null
  slurm:
    gpus_per_node: 8
    partition: "hopper-prod"
    hf_cache: "/fsx/elie_bakouch/.cache/huggingface"
    cpus_per_task: 88
    qos: "normal"
    time: "01:59:00"
    reservation: smollm  # Optional reservation name, can be null