KMasaki commited on
Commit
ae49d6c
·
verified ·
1 Parent(s): 68d29b5

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
3
- datasets: HuggingFaceH4/Bespoke-Stratos-17k
4
  library_name: transformers
5
  model_name: Qwen2.5-1.5B-Open-R1-Distill
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - sft
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) on the [HuggingFaceH4/Bespoke-Stratos-17k](https://huggingface.co/datasets/HuggingFaceH4/Bespoke-Stratos-17k) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/kawamuramasaki/open-r1/runs/gkc638sj)
33
 
34
 
35
  This model was trained with SFT.
@@ -37,9 +35,9 @@ This model was trained with SFT.
37
  ### Framework versions
38
 
39
  - TRL: 0.16.0.dev0
40
- - Transformers: 4.49.0.dev0
41
  - Pytorch: 2.5.1
42
- - Datasets: 3.3.0
43
  - Tokenizers: 0.21.0
44
 
45
  ## Citations
 
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-Open-R1-Distill
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/kawamuramasaki/open-r1/runs/jv1rnqpw)
31
 
32
 
33
  This model was trained with SFT.
 
35
  ### Framework versions
36
 
37
  - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
  - Pytorch: 2.5.1
40
+ - Datasets: 3.3.2
41
  - Tokenizers: 0.21.0
42
 
43
  ## Citations
all_results.json CHANGED
@@ -1,13 +1,8 @@
1
  {
2
- "eval_loss": 0.7649670839309692,
3
- "eval_runtime": 2.4402,
4
- "eval_samples": 100,
5
- "eval_samples_per_second": 52.866,
6
- "eval_steps_per_second": 3.688,
7
- "total_flos": 76774385909760.0,
8
- "train_loss": 0.7816428508532154,
9
- "train_runtime": 1403.6888,
10
- "train_samples": 16610,
11
- "train_samples_per_second": 15.404,
12
- "train_steps_per_second": 0.24
13
  }
 
1
  {
2
+ "total_flos": 488051495141376.0,
3
+ "train_loss": 0.6885459087119324,
4
+ "train_runtime": 16426.9677,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 33.366,
7
+ "train_steps_per_second": 0.261
 
 
 
 
 
8
  }
generation_config.json CHANGED
@@ -10,5 +10,5 @@
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
- "transformers_version": "4.49.0.dev0"
14
  }
 
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
+ "transformers_version": "4.49.0"
14
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 76774385909760.0,
3
- "train_loss": 0.7816428508532154,
4
- "train_runtime": 1403.6888,
5
- "train_samples": 16610,
6
- "train_samples_per_second": 15.404,
7
- "train_steps_per_second": 0.24
8
  }
 
1
  {
2
+ "total_flos": 488051495141376.0,
3
+ "train_loss": 0.6885459087119324,
4
+ "train_runtime": 16426.9677,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 33.366,
7
+ "train_steps_per_second": 0.261
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff