eliebak HF Staff commited on
Commit
d99ed92
·
verified ·
1 Parent(s): e16256f

Create lc_32k

Browse files
Files changed (1) hide show
  1. lc_32k +326 -0
lc_32k ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 2000
3
+ checkpoints_path: /scratch/elie-checkpoints-stage2-lc
4
+ checkpoints_path_is_shared_file_system: false
5
+ load_lr_scheduler: true
6
+ load_optimizer: true
7
+ resume_checkpoint_path: s3://smollm3/tp-fix-final-pre-training/elie-lc-prolong-think-chatml-mix01
8
+ save_final_state: true
9
+ save_initial_state: false
10
+ data_stages:
11
+ - data:
12
+ dataset:
13
+ dataset_read_path: # replace some programming languages with stack-edu-real, add finemath4+ and infiwebmath4+ and MegaMath, donwsample some sources like SE
14
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fineweb-edu
15
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/dclm
16
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/pes2o
17
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/wiki
18
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/mwiki
19
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/cosmopedia2
20
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stackexchange
21
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-fra_Latn
22
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-spa_Latn
23
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-deu_Latn
24
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-ita_Latn
25
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-por_Latn
26
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-cmn_Hani
27
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-rus_Cyrl
28
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-arb_Arab
29
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-jpn_Jpan
30
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-kor_Hang
31
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hin_Deva
32
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-tha_Thai
33
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-vie_Latn
34
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/fw2-hq-ell_Grek
35
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/infiwebmath-3plus
36
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/finemath-3plus
37
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/infiwebmath-4plus
38
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/finemath-4plus
39
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/megamath-web-pro
40
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/megamath-qa-qwen
41
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/megamath-text-code-block
42
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Python
43
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Java
44
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-JavaScript
45
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-C
46
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Cpp
47
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-C-Sharp
48
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-PHP
49
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-TypeScript
50
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Swift
51
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-SQL
52
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Ruby
53
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Markdown
54
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-HTML
55
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Rust
56
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Go
57
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/stack-edu-real-Shell
58
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/pull-requests
59
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/kaggle
60
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/jupyter-scripts
61
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/github-issues
62
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/openmathinstruct-2
63
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/openmathreasoning-4k
64
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/opencodereasoning-4k
65
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/natural_reasoning
66
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/problem-solving
67
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/short-context/2students
68
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/long-context/reasoning_nochatml_17M_4k_to_32k-correct_cpu_tok_4k_to_32k
69
+ - /fsx/elie_bakouch/smollm3_training/1004-nn/data/tokenized_data/long-context/reasoning_tulu_chatml_580k_all_lengths_cpu_tok_4k_to_32k
70
+ dataset_folder:
71
+ - s3://smollm3/datasets/llama_tokenized_32769/fineweb-edu/tokenized/
72
+ - s3://smollm3/datasets/llama_tokenized_32769/dclm/tokenized/
73
+ - s3://smollm3/datasets/llama_tokenized_32769/pes2o/tokenized/
74
+ - s3://smollm3/datasets/llama_tokenized_32769/wiki/standard/
75
+ - s3://smollm3/datasets/llama_tokenized_32769/mwiki/standard
76
+ - s3://smollm3/datasets/llama_tokenized_32769/cosmopedia2/tokenized/
77
+ - s3://smollm3/datasets/llama_tokenized_32769/stackexchange/standard/
78
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-fra_Latn/standard/
79
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-spa_Latn/standard/
80
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-deu_Latn/standard/
81
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-ita_Latn/standard/
82
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-por_Latn/standard/
83
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-cmn_Hani/tokenized/
84
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-rus_Cyrl/tokenized/
85
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-arb_Arab/standard/
86
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-jpn_Jpan/standard/
87
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-kor_Hang/tokenized/
88
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hin_Deva/standard/
89
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-tha_Thai/standard/
90
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-vie_Latn/standard/
91
+ - s3://smollm3/datasets/llama_tokenized_32769/fw2-hq-ell_Grek/standard/
92
+ - s3://smollm3/datasets/llama_tokenized_32769/infiwebmath-3plus/standard/
93
+ - s3://smollm3/datasets/llama_tokenized_32769/finemath-3plus/standard/
94
+ - s3://smollm3/datasets/llama_tokenized_32769/infiwebmath-4plus/standard/
95
+ - s3://smollm3/datasets/llama_tokenized_32769/finemath-4plus/standard/
96
+ - s3://smollm3/datasets/llama_tokenized_32769/megamath-web-pro/tokenized/
97
+ - s3://smollm3/datasets/llama_tokenized_32769/megamath-qa-qwen/tokenized/
98
+ - s3://smollm3/datasets/llama_tokenized_32769/megamath-text-code-block/tokenized/
99
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Python/standard/
100
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Java/standard/
101
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-JavaScript/standard/
102
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-C/standard/
103
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Cpp/standard/
104
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-C-Sharp/standard/
105
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-PHP/standard/
106
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-TypeScript/standard/
107
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Swift/standard/
108
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-SQL/standard/
109
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Ruby/standard/
110
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Markdown/standard/
111
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-HTML/standard/
112
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Rust/standard/
113
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Go/standard/
114
+ - s3://smollm3/datasets/llama_tokenized_32769/stack-edu-real-Shell/standard/
115
+ - s3://smollm3/datasets/llama_tokenized_32769/pull-requests/standard/
116
+ - s3://smollm3/datasets/llama_tokenized_32769/kaggle/standard/
117
+ - s3://smollm3/datasets/llama_tokenized_32769/jupyter-scripts/standard/
118
+ - s3://smollm3/datasets/llama_tokenized_32769/github-issues/standard/
119
+ - s3://smollm3/datasets/llama_tokenized_32769/openmathinstruct-2/tokenized/
120
+ - s3://smollm3/datasets/llama_tokenized_32769/openmathreasoning-4k/tokenized/
121
+ - s3://smollm3/datasets/llama_tokenized_32769/opencodereasoning-4k/standard/
122
+ - s3://smollm3/datasets/llama_tokenized_32769/natural_reasoning/standard/
123
+ - s3://smollm3/datasets/llama_tokenized_32769/problem-solving/tokenized/
124
+ - s3://smollm3/datasets/llama_tokenized_32769/2students/tokenized/
125
+ - s3://smollm3/datasets/elie_correct_llama_tokenized_32769/reasoning_nochatml_17M_4k_to_32k-correct_cpu_tok_4k_to_32k/tokenized/
126
+ - s3://smollm3/datasets/elie_correct_llama_tokenized_32769/reasoning_tulu_chatml_580k_all_lengths_cpu_tok_4k_to_32k/tokenized/
127
+ dataset_weights:
128
+ - 0.1253669
129
+ - 0.2686434
130
+ - 0.0017910 #pes2o
131
+ - 0.0001791 # Wiki
132
+ - 0.0071638 # mwiki
133
+ - 0.0035819 # cosmopedia
134
+ - 0.0008955 # SE
135
+ - 0.0161186
136
+ - 0.0197005
137
+ - 0.0205960
138
+ - 0.0111935
139
+ - 0.0040297
140
+ - 0.0089548
141
+ - 0.0089548
142
+ - 0.0080593
143
+ - 0.0028655
144
+ - 0.0028655
145
+ - 0.0028655
146
+ - 0.0028655
147
+ - 0.0000448 # downsample viet, too many epochs
148
+ - 0.0019701
149
+ - 0.0017910 # update infiwebmath3+
150
+ - 0.0017910 # update finemath3+
151
+ - 0.0179096 # add infiwebmath4+
152
+ - 0.0223869 # add finemath4+
153
+ - 0.0125367 # add MegamathWeb-Pro
154
+ - 0.0017910 # MegaMathQA-Qwen
155
+ - 0.0626834 # add MegaMath-Text-Code
156
+ - 0.0805930 # add stack-edu-real-shuffled-Python
157
+ - 0.0161186 # Java
158
+ - 0.0161186 # JS
159
+ - 0.0071638 # C
160
+ - 0.0394010 # add stack-edu-real-shuffled-Cpp
161
+ - 0.0053729 # C# Edu
162
+ - 0.0053729 # PHP
163
+ - 0.0026864 # TS
164
+ - 0.0017910 # Swift
165
+ - 0.0116412 # SQL
166
+ - 0.0008955 # Ruby
167
+ - 0.0044774 # Md
168
+ - 0.0091339 # HTML
169
+ - 0.0008955 # Rust
170
+ - 0.0004477 # Go
171
+ - 0.0053729 # Shell
172
+ - 0.0044774 # PRs
173
+ - 0.0005373 # kaggle
174
+ - 0.0107457 # notebooks
175
+ - 0.0035819 # issues
176
+ - 0.0223869 # OpenMathInstruct
177
+ - 0.0044774 # OpenMathReasoning
178
+ - 0.0004477 # OpenCodeReasoning
179
+ - 0.0008955 # Natural reasoning
180
+ - 0.0026864 # Olmo Problem solving
181
+ - 0.0026864 # Olmo two students
182
+ - 0.09 # 0.1 * 0.2 * 0.75 DCLM 16k
183
+ - 0.01 # 0.1 * 0.2 * 0.75 DCLM 16k
184
+ pad_samples_to_global_batch_size: false
185
+ return_positions: true
186
+ token_size_in_bytes: 4
187
+ tokenizer_name: HuggingFaceTB/SmolLM3-11T-32k-v1-remote-code
188
+ use_old_brrr_dataloader: false
189
+ vocab_size: 128256
190
+ num_loading_workers: 0
191
+ seed: 6
192
+ name: lc stage
193
+ start_training_step: 1
194
+ general:
195
+ benchmark_csv_path: null
196
+ consumed_train_samples: null
197
+ ignore_sanity_checks: true
198
+ project: smollm3-3B-final
199
+ run: elie-lc-prolong-think-chatml-mix01
200
+ seed: 6
201
+ step: null
202
+ logging:
203
+ iteration_step_info_interval: 1
204
+ log_level: info
205
+ log_level_replica: info
206
+ model:
207
+ ddp_bucket_cap_mb: 50
208
+ dtype: bfloat16
209
+ init_method:
210
+ std: 0.02
211
+ make_vocab_size_divisible_by: 1
212
+ model_config:
213
+ _attn_implementation: llama3_ring_attention
214
+ _fused_rms_norm: true
215
+ _fused_rotary_emb: true
216
+ _use_doc_masking: true
217
+ _use_qkv_packed: true
218
+ ring_attn_heads_k_stride: 1
219
+ attention_bias: false
220
+ bos_token_id: 128000
221
+ eos_token_id: 128001
222
+ flex_attention_mask: null
223
+ hidden_act: silu
224
+ hidden_size: 2048
225
+ initializer_range: 0.02
226
+ intermediate_size: 11008
227
+ is_qwen2_config: true
228
+ max_position_embeddings: 32768
229
+ moe_config: null
230
+ num_attention_heads: 16
231
+ num_hidden_layers: 36
232
+ num_key_value_heads: 4
233
+ pad_token_id: null
234
+ pretraining_tp: 2
235
+ rms_norm_eps: 1.0e-06
236
+ rope_interleaved: false
237
+ rope_scaling: null
238
+ rope_theta: 2000000.0
239
+ sliding_window_size: null
240
+ tie_word_embeddings: true
241
+ use_cache: true
242
+ vocab_size: 128256
243
+ z_loss_coefficient: 1.0e-05
244
+ z_loss_enabled: false
245
+ no_rope_layer: 4
246
+ optimizer:
247
+ accumulate_grad_in_fp32: true
248
+ clip_grad: 1.0
249
+ learning_rate_scheduler:
250
+ learning_rate: 0.00002
251
+ lr_decay_starting_step: 1000
252
+ lr_decay_steps: 19000
253
+ lr_decay_style: cosine
254
+ lr_warmup_steps: 1000
255
+ lr_warmup_style: linear
256
+ min_decay_lr: 0
257
+ optimizer_factory:
258
+ adam_beta1: 0.9
259
+ adam_beta2: 0.95
260
+ adam_eps: 1.0e-08
261
+ name: adamW
262
+ torch_adam_is_fused: true
263
+ weight_decay: 0.1
264
+ weight_decay_exclude_named_params:
265
+ - .*token_embedding.*
266
+ zero_stage: 0
267
+ parallelism:
268
+ context_parallel_size: 4
269
+ dp: 12
270
+ expert_parallel_size: 1
271
+ moe_layer_recompute: false
272
+ pp: 1
273
+ pp_engine: 1f1b
274
+ recompute_layer: false
275
+ tp: 2
276
+ tp_linear_async_communication: true
277
+ tp_mode: REDUCE_SCATTER
278
+ tp_recompute_allgather: true
279
+ s3_upload:
280
+ remove_after_upload: true
281
+ s5cmd_concurrency: 5
282
+ s5cmd_numworkers: 16
283
+ s5cmd_path: /fsx/elie_bakouch/smollm3_training/1004-nn/1004-hope/bin/s5cmd
284
+ upload_s3_path: s3://smollm3/tp-fix-final-pre-training/elie-lc-prolong-think-chatml-mix01
285
+ tokenizer:
286
+ tokenizer_max_length: 32768
287
+ tokenizer_name_or_path: HuggingFaceTB/SmolLM3-11T-32k-v1-remote-code
288
+ tokenizer_revision: null
289
+ metrics_logging:
290
+ log_level: 1
291
+ log_detail_interval: 200
292
+ tokens:
293
+ batch_accumulation_per_replica: 6
294
+ limit_test_batches: 0
295
+ limit_val_batches: 0
296
+ micro_batch_size: 1
297
+ sequence_length: 32768
298
+ train_steps: 20000
299
+ val_check_interval: 100
300
+ lighteval:
301
+ slurm_script_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/launch-config" # Default path for launch scripts
302
+ logs_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/logs" # Default path for evaluation logs
303
+ local_checkpoint_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/evals-ckpt" # Default path for temporary checkpoint storage. Will store under {local_checkpoint_dir}/{run_name}/{step}
304
+ nanotron_path: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron"
305
+ output_dir: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/eval_results/results"
306
+ s3_save_path: "s3://smollm3/eval-prod"
307
+ upload_to_wandb: true
308
+ wandb_project: smollm3-3B-evals
309
+ wandb_entity: huggingface
310
+ parallelism:
311
+ dp: 4
312
+ pp: 1
313
+ tp: 2
314
+ tp_linear_async_communication: true
315
+ batch_size: 8 # Optional batch size for evaluation
316
+ eval_config_override: "/fsx/elie_bakouch/smollm3_training/1004-nn/nanotron/_final1004/smollm3_eval_decay.yaml" # The evaluation config file to use
317
+ eval_interval: 400000
318
+ eval_interval_file: null
319
+ slurm:
320
+ gpus_per_node: 8
321
+ partition: "hopper-prod"
322
+ hf_cache: "/fsx/elie_bakouch/.cache/huggingface"
323
+ cpus_per_task: 88
324
+ qos: "normal"
325
+ time: "01:59:00"
326
+ reservation: smollm # Optional reservation name, can be null