RyanYr commited on
Commit
ee1ea77
·
verified ·
1 Parent(s): 1e1f8c0

Save model at global step 1235

Browse files
config.yaml ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ tokenizer: null
3
+ train_files: aime24_ds_train_sample.parquet
4
+ val_files: matheval.parquet
5
+ prompt_key: prompt
6
+ reward_fn_key: data_source
7
+ max_prompt_length: 1024
8
+ max_response_length: 3072
9
+ train_batch_size: 256
10
+ val_batch_size: null
11
+ return_raw_input_ids: false
12
+ return_raw_chat: false
13
+ shuffle: true
14
+ filter_overlong_prompts: true
15
+ filter_overlong_prompts_workers: 1
16
+ truncation: error
17
+ image_key: images
18
+ custom_cls:
19
+ path: null
20
+ name: null
21
+ actor_rollout_ref:
22
+ hybrid_engine: true
23
+ model:
24
+ path: Qwen/Qwen2.5-Math-1.5B
25
+ external_lib: null
26
+ override_config: {}
27
+ enable_gradient_checkpointing: true
28
+ use_remove_padding: true
29
+ use_liger: false
30
+ save_hf_repo_id: RyanYr/grpo_neg0.01-aime24-qwen2.5math-1.5B-base-mbs128-n4-ref1230-975b46d_actor
31
+ tokenizer_chat_template: null
32
+ actor:
33
+ strategy: fsdp
34
+ ppo_mini_batch_size: 128
35
+ ppo_micro_batch_size: null
36
+ ppo_micro_batch_size_per_gpu: 16
37
+ use_dynamic_bsz: false
38
+ ppo_max_token_len_per_gpu: 16384
39
+ grad_clip: 1.0
40
+ clip_ratio: 0.2
41
+ clip_ratio_low: 0.2
42
+ clip_ratio_high: 0.2
43
+ clip_ratio_c: 3.0
44
+ loss_agg_mode: token-mean
45
+ entropy_coeff: 0
46
+ use_kl_loss: true
47
+ use_torch_compile: true
48
+ kl_loss_coef: 0.001
49
+ kl_loss_type: low_var_kl
50
+ ppo_epochs: 1
51
+ shuffle: false
52
+ ulysses_sequence_parallel_size: 1
53
+ checkpoint:
54
+ contents:
55
+ - model
56
+ - optimizer
57
+ - extra
58
+ optim:
59
+ lr: 1.0e-06
60
+ lr_warmup_steps: -1
61
+ lr_warmup_steps_ratio: 0.0
62
+ min_lr_ratio: null
63
+ warmup_style: constant
64
+ total_training_steps: 2000
65
+ weight_decay: 0.01
66
+ fsdp_config:
67
+ wrap_policy:
68
+ min_num_params: 0
69
+ param_offload: false
70
+ optimizer_offload: false
71
+ fsdp_size: -1
72
+ ref:
73
+ ref_model_path: RyanYr/grpo-aime24-qwen2.5math-1.5B-base-mbs128-n4_actor_1230-975b46d
74
+ strategy: fsdp
75
+ fsdp_config:
76
+ param_offload: false
77
+ wrap_policy:
78
+ min_num_params: 0
79
+ log_prob_micro_batch_size: null
80
+ log_prob_micro_batch_size_per_gpu: 64
81
+ log_prob_use_dynamic_bsz: false
82
+ log_prob_max_token_len_per_gpu: 16384
83
+ ulysses_sequence_parallel_size: 1
84
+ rollout:
85
+ name: vllm
86
+ temperature: 1.0
87
+ top_k: -1
88
+ top_p: 1
89
+ use_fire_sampling: false
90
+ prompt_length: 1024
91
+ response_length: 3072
92
+ dtype: bfloat16
93
+ gpu_memory_utilization: 0.75
94
+ ignore_eos: false
95
+ enforce_eager: false
96
+ free_cache_engine: false
97
+ load_format: dummy_dtensor
98
+ tensor_model_parallel_size: 4
99
+ max_num_batched_tokens: 4096
100
+ max_model_len: null
101
+ max_num_seqs: 1024
102
+ log_prob_micro_batch_size: null
103
+ log_prob_micro_batch_size_per_gpu: 64
104
+ log_prob_use_dynamic_bsz: false
105
+ log_prob_max_token_len_per_gpu: 16384
106
+ disable_log_stats: true
107
+ enable_chunked_prefill: true
108
+ do_sample: true
109
+ 'n': 4
110
+ engine_kwargs:
111
+ swap_space: null
112
+ val_kwargs:
113
+ top_k: -1
114
+ top_p: 1.0
115
+ temperature: 0
116
+ 'n': 1
117
+ do_sample: false
118
+ critic:
119
+ rollout_n: 4
120
+ strategy: fsdp
121
+ optim:
122
+ lr: 1.0e-05
123
+ lr_warmup_steps_ratio: 0.0
124
+ min_lr_ratio: null
125
+ warmup_style: constant
126
+ total_training_steps: 2000
127
+ weight_decay: 0.01
128
+ model:
129
+ path: ~/models/deepseek-llm-7b-chat
130
+ tokenizer_path: Qwen/Qwen2.5-Math-1.5B
131
+ override_config: {}
132
+ external_lib: null
133
+ enable_gradient_checkpointing: true
134
+ use_remove_padding: false
135
+ fsdp_config:
136
+ param_offload: false
137
+ optimizer_offload: false
138
+ wrap_policy:
139
+ min_num_params: 0
140
+ fsdp_size: -1
141
+ save_hf_repo_id: null
142
+ ppo_mini_batch_size: 128
143
+ ppo_micro_batch_size: null
144
+ ppo_micro_batch_size_per_gpu: null
145
+ forward_micro_batch_size: null
146
+ forward_micro_batch_size_per_gpu: null
147
+ use_dynamic_bsz: false
148
+ ppo_max_token_len_per_gpu: 32768
149
+ forward_max_token_len_per_gpu: 32768
150
+ ulysses_sequence_parallel_size: 1
151
+ ppo_epochs: 1
152
+ shuffle: false
153
+ grad_clip: 1.0
154
+ cliprange_value: 0.5
155
+ checkpoint:
156
+ contents:
157
+ - model
158
+ - optimizer
159
+ - extra
160
+ reward_model:
161
+ enable: false
162
+ strategy: fsdp
163
+ model:
164
+ input_tokenizer: Qwen/Qwen2.5-Math-1.5B
165
+ path: ~/models/FsfairX-LLaMA3-RM-v0.1
166
+ external_lib: null
167
+ use_remove_padding: false
168
+ fsdp_config:
169
+ wrap_policy:
170
+ min_num_params: 0
171
+ param_offload: false
172
+ fsdp_size: -1
173
+ micro_batch_size: null
174
+ micro_batch_size_per_gpu: null
175
+ max_length: null
176
+ ulysses_sequence_parallel_size: 1
177
+ use_dynamic_bsz: false
178
+ forward_max_token_len_per_gpu: 32768
179
+ reward_manager: prime
180
+ custom_reward_function:
181
+ path: null
182
+ name: compute_score
183
+ algorithm:
184
+ gamma: 1.0
185
+ lam: 1.0
186
+ adv_estimator: grpo_neg
187
+ use_kl_in_reward: false
188
+ grpo_neg:
189
+ mean_penalty: 0.01
190
+ kl_penalty: kl
191
+ kl_ctrl:
192
+ type: fixed
193
+ kl_coef: 0.001
194
+ horizon: 10000
195
+ target_kl: 0.1
196
+ trainer:
197
+ balance_batch: true
198
+ total_epochs: 1000000000000
199
+ total_training_steps: 2000
200
+ project_name: value-LLM
201
+ experiment_name: grpo_neg0.01-aime24-qwen2.5math-1.5B-base-mbs128-n4-ref1230-975b46d
202
+ logger:
203
+ - console
204
+ - wandb
205
+ log_val_generations: 0
206
+ nnodes: 1
207
+ n_gpus_per_node: 4
208
+ save_freq: 5
209
+ resume_mode: auto
210
+ resume_from_path: null
211
+ val_before_train: false
212
+ test_freq: -1
213
+ critic_warmup: 0
214
+ default_hdfs_dir: null
215
+ del_local_ckpt_after_load: false
216
+ default_local_dir: checkpoints/value-LLM/grpo_neg0.01-aime24-qwen2.5math-1.5B-base-mbs128-n4-ref1230-975b46d
217
+ max_actor_ckpt_to_keep: 1
218
+ max_critic_ckpt_to_keep: 1
219
+ ray_wait_register_center_timeout: 300
220
+ hf_token: null
221
+ resume_from_hf:
222
+ enable: true
223
+ actor_hf_repo_id: RyanYr/grpo-aime24-qwen2.5math-1.5B-base-mbs128-n4-ref895-82bb89a_actor
224
+ actor_revision: 975b46d1ee3ee658c46b85220a34a95c384f4078
225
+ critic_hf_repo_id: null
226
+ critic_revision: main
227
+ hf_token: null
data.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bc1bd198dfb33b20ea3ec9c6472f90016c8013b8bd571a556148caa4054af8a
3
+ size 1492
extra_state_world_size_4_rank_0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:128166d590b7b4c0ece3cac613ae83a5b32b8a808475bdf746d97e9cb75e7604
3
+ size 14632
extra_state_world_size_4_rank_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f4f8e9a23b316750904c6ea4eaf1dc2ee49bbc7ae854e41de331e8261930dd
3
+ size 14632
extra_state_world_size_4_rank_2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac50bfad296a68ebe814cae144038535c2525f2de2342aba397a420a7460d806
3
+ size 14632
extra_state_world_size_4_rank_3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eafa30e154fae36c6e9384b02baab8b66a392e6e7edaf354d5f0eb0722512f3c
3
+ size 14632
model_world_size_4_rank_0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e76e166aedfba42b626aa7f805b702ddc8da8b0a16301f00dbd52ff0ba79de30
3
+ size 1777276538
model_world_size_4_rank_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd98b5f6c4f68a6763498b6fe673fef4a9227bba8ff23ffac56e55de7d7f040
3
+ size 1777276538
model_world_size_4_rank_2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de07ff493005f7cfabb2b455d0874c859a05cbcc84a4d0dfe8819103bf64f1a0
3
+ size 1777276538
model_world_size_4_rank_3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cea24478fe54f2d20d8f05053e58ff9ddb4dad0dd5a8484dc6343735a5f2c467
3
+ size 1777276538
optim_world_size_4_rank_0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3681059ac0968807dd1023bf2df98d51db34b6b2d7aafd4b95ac8367d7ebe861
3
+ size 3087454775
optim_world_size_4_rank_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5990306ed23970b15fb6729d5e44118008fcf5e42189556ed85877d7a060ffbd
3
+ size 3087454775
optim_world_size_4_rank_2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950aff4abb79f05a1c676918bf5775f0ab26afb7f8603b9cfa239bbdcc88a3e6
3
+ size 3087454775
optim_world_size_4_rank_3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cba4b3950d3de6548265c62252c922ef5b90f7bd71dc369ae4a4b9e9fa7bd01
3
+ size 3087454775