HaichuanWang commited on
Commit
f089127
·
verified ·
1 Parent(s): 6e27b0b

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-Math-7B
3
- datasets: DigitalLearningGmbH/MATH-lighteval
4
  library_name: transformers
5
  model_name: Qwen-2.5-7B-Simple-RL
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - grpo
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen-2.5-7B-Simple-RL
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) on the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,18 +27,18 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/wanghaichuan/huggingface/runs/eovbfgxo)
33
 
34
 
35
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
36
 
37
  ### Framework versions
38
 
39
- - TRL: 0.15.0.dev0
40
- - Transformers: 4.49.0.dev0
41
  - Pytorch: 2.5.1
42
- - Datasets: 3.2.0
43
- - Tokenizers: 0.21.0
44
 
45
  ## Citations
46
 
 
1
  ---
2
  base_model: Qwen/Qwen2.5-Math-7B
 
3
  library_name: transformers
4
  model_name: Qwen-2.5-7B-Simple-RL
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - grpo
9
  licence: license
 
11
 
12
  # Model Card for Qwen-2.5-7B-Simple-RL
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/wanghaichuan/huggingface/runs/13u15bim)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
  - Pytorch: 2.5.1
40
+ - Datasets: 3.4.0
41
+ - Tokenizers: 0.21.1
42
 
43
  ## Citations
44
 
all_results.json CHANGED
@@ -1,13 +1,8 @@
1
  {
2
- "eval_loss": 0.0003125647781416774,
3
- "eval_runtime": 2181.3395,
4
- "eval_samples": 5000,
5
- "eval_samples_per_second": 2.292,
6
- "eval_steps_per_second": 0.024,
7
  "total_flos": 0.0,
8
- "train_loss": 1.2402063848306162,
9
- "train_runtime": 35030.8408,
10
  "train_samples": 7500,
11
- "train_samples_per_second": 0.214,
12
- "train_steps_per_second": 0.013
13
  }
 
1
  {
 
 
 
 
 
2
  "total_flos": 0.0,
3
+ "train_loss": 0.04672712115020638,
4
+ "train_runtime": 102271.8167,
5
  "train_samples": 7500,
6
+ "train_samples_per_second": 0.073,
7
+ "train_steps_per_second": 0.009
8
  }
config.json CHANGED
@@ -19,11 +19,11 @@
19
  "rms_norm_eps": 1e-06,
20
  "rope_scaling": null,
21
  "rope_theta": 10000,
22
- "sliding_window": null,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.49.0.dev0",
26
- "use_cache": true,
27
  "use_mrope": false,
28
  "use_sliding_window": false,
29
  "vocab_size": 152064
 
19
  "rms_norm_eps": 1e-06,
20
  "rope_scaling": null,
21
  "rope_theta": 10000,
22
+ "sliding_window": 4096,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.49.0",
26
+ "use_cache": false,
27
  "use_mrope": false,
28
  "use_sliding_window": false,
29
  "vocab_size": 152064
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "bos_token_id": 151643,
3
  "eos_token_id": 151643,
4
  "max_new_tokens": 2048,
5
- "transformers_version": "4.49.0.dev0"
6
  }
 
2
  "bos_token_id": 151643,
3
  "eos_token_id": 151643,
4
  "max_new_tokens": 2048,
5
+ "transformers_version": "4.49.0"
6
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e69be4d87367acb672fc350b2f7046dd9e2cf9ec618cc0dec52498909317321
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dcc0bde40bdebe6c68a646753ed9c5a622e3ee940a1d93996cf3a2818fd425e
3
  size 4877660776
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3458aa7ffac11ae8ca5246d72a62a9f8ec01bf58697b62c79c6a7768d1339b2b
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32d6e2b1ded3b5ac1353a256c3c88dcf8a34c5a35bf230f0383023cf8680e33c
3
  size 4932751008
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5643906fe24b7997e2b7526af64e1c67d031a5c00322502a2da8baaf08965da6
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0e6d03348992189553c9e4098e91b3c1dd0cea3a1856c8a69824a82c0fa6d2a
3
  size 4330865200
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d61362ab237cfd9e3b5983d8f2fca8919a1eda6fd17ca3a62e568b5a98e6542
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559b09a7573e2b791190f0929c6f09e757985fed99f102ea89042f0040a4eab9
3
  size 1089994880
tokenizer_config.json CHANGED
@@ -202,7 +202,6 @@
202
  "extra_special_tokens": {},
203
  "model_max_length": 131072,
204
  "pad_token": "<|endoftext|>",
205
- "padding_side": "left",
206
  "split_special_tokens": false,
207
  "tokenizer_class": "Qwen2Tokenizer",
208
  "unk_token": null
 
202
  "extra_special_tokens": {},
203
  "model_max_length": 131072,
204
  "pad_token": "<|endoftext|>",
 
205
  "split_special_tokens": false,
206
  "tokenizer_class": "Qwen2Tokenizer",
207
  "unk_token": null
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 1.2402063848306162,
4
- "train_runtime": 35030.8408,
5
  "train_samples": 7500,
6
- "train_samples_per_second": 0.214,
7
- "train_steps_per_second": 0.013
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.04672712115020638,
4
+ "train_runtime": 102271.8167,
5
  "train_samples": 7500,
6
+ "train_samples_per_second": 0.073,
7
+ "train_steps_per_second": 0.009
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4168a8b1403b82181fdcb547faea4b2772074aaa3495cbe3296d7263bc74c14
3
- size 7480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c079e39586a2dd2e18ea4f75e053a693fcb56f1cf9f4a09a9df76f1d1ec00fcb
3
+ size 7992