Haitao999 commited on
Commit
6a6c687
·
verified ·
1 Parent(s): 80c9aef

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Llama-3.1-8B-Instruct-EMPO-numia_prompt_dpo1
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Llama-3.1-8B-Instruct-EMPO-numia_prompt_dpo1
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="Haitao999/Llama-3.1-8B-Instruct-EMPO-numia_prompt_dpo1", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/tjucsailab/huggingface/runs/pqqfe42e)
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.14.0
37
+ - Transformers: 4.48.3
38
+ - Pytorch: 2.5.1
39
+ - Datasets: 3.2.0
40
+ - Tokenizers: 0.21.1
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{zhihong2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 5.6459,
5
+ "train_samples": 20000,
6
+ "train_samples_per_second": 3542.367,
7
+ "train_steps_per_second": 31.527
8
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.48.3"
12
+ }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:155cb6d9f5f39a5fb88ebaa2f4050dd08807760acfe13ce10649ff4f3bcabe79
3
  size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fd2cc9a7017fd103bb057eae13ef7016842ef12d8927b90ccd6e4c61af94b66
3
  size 4976698672
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e180399d64d96aba0fa151071ec28782acf2b74bd238ec680f087a049916657
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13de048574c6d88724991723b0893a85edf4b6982be40f4231e09c581f9622db
3
  size 4999802720
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afb9f04c12606d8c10e6755a6acd560973d0e6319662350bc5b17df768d7e9a5
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4d7e90ac34e5bbd08b7363726ffd2041c319f0d2b5ef5fddeb97f00a584f55
3
  size 4915916176
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f45e295b2ca20cd4f7bdac1aeeb2e1fcf61c8e7ea8cdea9a88553e84b79f3ed
3
  size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d783f3b17b5cfb1b7afe6217a06475d9f5e30fc85fa21065195edddc2f712e63
3
  size 1168138808
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65ff5472d095ccd9332d9e723153d7bc7226cb6be9c1bffda738b5ba2e71bf26
3
- size 17210084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
3
+ size 17209920
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 5.6459,
5
+ "train_samples": 20000,
6
+ "train_samples_per_second": 3542.367,
7
+ "train_steps_per_second": 31.527
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9965010496850945,
5
+ "eval_steps": 100,
6
+ "global_step": 178,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 621.5997104644775,
13
+ "epoch": 0.005598320503848845,
14
+ "grad_norm": 0.018829375505447388,
15
+ "learning_rate": 1e-06,
16
+ "loss": 0.0,
17
+ "reward": 0.26314485468901694,
18
+ "reward_std": 0.09563918481580913,
19
+ "rewards/semantic_entropy_math_reward": 0.26314485468901694,
20
+ "step": 1
21
+ },
22
+ {
23
+ "completion_length": 600.1235256195068,
24
+ "epoch": 0.01119664100769769,
25
+ "grad_norm": 0.021261176094412804,
26
+ "learning_rate": 1e-06,
27
+ "loss": 0.0,
28
+ "reward": 0.2620287789031863,
29
+ "reward_std": 0.09228823287412524,
30
+ "rewards/semantic_entropy_math_reward": 0.2620287789031863,
31
+ "step": 2
32
+ },
33
+ {
34
+ "completion_length": 640.598970413208,
35
+ "epoch": 0.016794961511546535,
36
+ "grad_norm": 0.0211239755153656,
37
+ "learning_rate": 1e-06,
38
+ "loss": 0.0,
39
+ "reward": 0.2310267963912338,
40
+ "reward_std": 0.08434095047414303,
41
+ "rewards/semantic_entropy_math_reward": 0.2310267963912338,
42
+ "step": 3
43
+ },
44
+ {
45
+ "completion_length": 643.0141410827637,
46
+ "epoch": 0.02239328201539538,
47
+ "grad_norm": 0.021212320774793625,
48
+ "learning_rate": 1e-06,
49
+ "loss": 0.0,
50
+ "reward": 0.22259425604715943,
51
+ "reward_std": 0.09978899103589356,
52
+ "rewards/semantic_entropy_math_reward": 0.22259425604715943,
53
+ "step": 4
54
+ },
55
+ {
56
+ "completion_length": 623.704626083374,
57
+ "epoch": 0.02799160251924423,
58
+ "grad_norm": 0.01992746628820896,
59
+ "learning_rate": 1e-06,
60
+ "loss": 0.0,
61
+ "reward": 0.3144841371104121,
62
+ "reward_std": 0.09517159080132842,
63
+ "rewards/semantic_entropy_math_reward": 0.3144841371104121,
64
+ "step": 5
65
+ },
66
+ {
67
+ "completion_length": 602.6532821655273,
68
+ "epoch": 0.03358992302309307,
69
+ "grad_norm": 0.027432788163423538,
70
+ "learning_rate": 1e-06,
71
+ "loss": 0.0,
72
+ "reward": 0.3053075410425663,
73
+ "reward_std": 0.11108083021827042,
74
+ "rewards/semantic_entropy_math_reward": 0.3053075410425663,
75
+ "step": 6
76
+ },
77
+ {
78
+ "completion_length": 632.2589473724365,
79
+ "epoch": 0.03918824352694192,
80
+ "grad_norm": 0.024943526834249496,
81
+ "learning_rate": 1e-06,
82
+ "loss": 0.0,
83
+ "reward": 0.2508680587634444,
84
+ "reward_std": 0.06983336608391255,
85
+ "rewards/semantic_entropy_math_reward": 0.2508680587634444,
86
+ "step": 7
87
+ },
88
+ {
89
+ "completion_length": 578.0238265991211,
90
+ "epoch": 0.04478656403079076,
91
+ "grad_norm": 0.030510740354657173,
92
+ "learning_rate": 1e-06,
93
+ "loss": 0.0,
94
+ "reward": 0.3060516007244587,
95
+ "reward_std": 0.09902600082568824,
96
+ "rewards/semantic_entropy_math_reward": 0.3060516007244587,
97
+ "step": 8
98
+ },
99
+ {
100
+ "completion_length": 656.7507629394531,
101
+ "epoch": 0.05038488453463961,
102
+ "grad_norm": 0.03022916242480278,
103
+ "learning_rate": 1e-06,
104
+ "loss": 0.0,
105
+ "reward": 0.2726934589445591,
106
+ "reward_std": 0.08185443677939475,
107
+ "rewards/semantic_entropy_math_reward": 0.2726934589445591,
108
+ "step": 9
109
+ },
110
+ {
111
+ "completion_length": 687.5104351043701,
112
+ "epoch": 0.05598320503848846,
113
+ "grad_norm": 0.03694581612944603,
114
+ "learning_rate": 1e-06,
115
+ "loss": 0.0,
116
+ "reward": 0.24987599975429475,
117
+ "reward_std": 0.08242391713429242,
118
+ "rewards/semantic_entropy_math_reward": 0.24987599975429475,
119
+ "step": 10
120
+ },
121
+ {
122
+ "completion_length": 726.5305213928223,
123
+ "epoch": 0.0615815255423373,
124
+ "grad_norm": 0.033508434891700745,
125
+ "learning_rate": 1e-06,
126
+ "loss": 0.0,
127
+ "reward": 0.21453373739495873,
128
+ "reward_std": 0.08386916620656848,
129
+ "rewards/semantic_entropy_math_reward": 0.21453373739495873,
130
+ "step": 11
131
+ },
132
+ {
133
+ "completion_length": 744.8936157226562,
134
+ "epoch": 0.06717984604618614,
135
+ "grad_norm": 0.030294176191091537,
136
+ "learning_rate": 1e-06,
137
+ "loss": 0.0,
138
+ "reward": 0.196924609830603,
139
+ "reward_std": 0.08265715674497187,
140
+ "rewards/semantic_entropy_math_reward": 0.196924609830603,
141
+ "step": 12
142
+ },
143
+ {
144
+ "completion_length": 744.174861907959,
145
+ "epoch": 0.072778166550035,
146
+ "grad_norm": 0.03118356317281723,
147
+ "learning_rate": 1e-06,
148
+ "loss": 0.0,
149
+ "reward": 0.19890873844269663,
150
+ "reward_std": 0.08003655297216028,
151
+ "rewards/semantic_entropy_math_reward": 0.19890873844269663,
152
+ "step": 13
153
+ },
154
+ {
155
+ "completion_length": 765.6265029907227,
156
+ "epoch": 0.07837648705388384,
157
+ "grad_norm": 0.02831243723630905,
158
+ "learning_rate": 1e-06,
159
+ "loss": 0.0,
160
+ "reward": 0.18092759023420513,
161
+ "reward_std": 0.06603722460567951,
162
+ "rewards/semantic_entropy_math_reward": 0.18092759023420513,
163
+ "step": 14
164
+ },
165
+ {
166
+ "completion_length": 758.721736907959,
167
+ "epoch": 0.08397480755773268,
168
+ "grad_norm": 0.03191084787249565,
169
+ "learning_rate": 1e-06,
170
+ "loss": 0.0,
171
+ "reward": 0.18340774439275265,
172
+ "reward_std": 0.07208729872945696,
173
+ "rewards/semantic_entropy_math_reward": 0.18340774439275265,
174
+ "step": 15
175
+ },
176
+ {
177
+ "completion_length": 662.6317081451416,
178
+ "epoch": 0.08957312806158152,
179
+ "grad_norm": 0.03559156879782677,
180
+ "learning_rate": 1e-06,
181
+ "loss": 0.0,
182
+ "reward": 0.26698909467086196,
183
+ "reward_std": 0.10032190917991102,
184
+ "rewards/semantic_entropy_math_reward": 0.26698909467086196,
185
+ "step": 16
186
+ },
187
+ {
188
+ "completion_length": 690.2068557739258,
189
+ "epoch": 0.09517144856543037,
190
+ "grad_norm": 0.040907151997089386,
191
+ "learning_rate": 1e-06,
192
+ "loss": 0.0,
193
+ "reward": 0.22792659618426114,
194
+ "reward_std": 0.07990038132993504,
195
+ "rewards/semantic_entropy_math_reward": 0.22792659618426114,
196
+ "step": 17
197
+ },
198
+ {
199
+ "completion_length": 714.0669784545898,
200
+ "epoch": 0.10076976906927922,
201
+ "grad_norm": 0.07471004873514175,
202
+ "learning_rate": 1e-06,
203
+ "loss": 0.0,
204
+ "reward": 0.21217758627608418,
205
+ "reward_std": 0.09499240922741592,
206
+ "rewards/semantic_entropy_math_reward": 0.21217758627608418,
207
+ "step": 18
208
+ },
209
+ {
210
+ "completion_length": 671.5535850524902,
211
+ "epoch": 0.10636808957312806,
212
+ "grad_norm": 0.07692024111747742,
213
+ "learning_rate": 1e-06,
214
+ "loss": 0.0,
215
+ "reward": 0.21106151002459228,
216
+ "reward_std": 0.09371245314832777,
217
+ "rewards/semantic_entropy_math_reward": 0.21106151002459228,
218
+ "step": 19
219
+ },
220
+ {
221
+ "completion_length": 599.3058185577393,
222
+ "epoch": 0.11196641007697691,
223
+ "grad_norm": 0.09223277866840363,
224
+ "learning_rate": 1e-06,
225
+ "loss": 0.0,
226
+ "reward": 0.2771577457897365,
227
+ "reward_std": 0.10236490913666785,
228
+ "rewards/semantic_entropy_math_reward": 0.2771577457897365,
229
+ "step": 20
230
+ },
231
+ {
232
+ "completion_length": 512.9620628356934,
233
+ "epoch": 0.11756473058082575,
234
+ "grad_norm": 0.13926267623901367,
235
+ "learning_rate": 1e-06,
236
+ "loss": 0.0,
237
+ "reward": 0.34102183766663074,
238
+ "reward_std": 0.1208265540190041,
239
+ "rewards/semantic_entropy_math_reward": 0.34102183766663074,
240
+ "step": 21
241
+ },
242
+ {
243
+ "completion_length": 479.3913764953613,
244
+ "epoch": 0.1231630510846746,
245
+ "grad_norm": 0.3304852247238159,
246
+ "learning_rate": 1e-06,
247
+ "loss": 0.0,
248
+ "reward": 0.2891865149140358,
249
+ "reward_std": 0.11506026284769177,
250
+ "rewards/semantic_entropy_math_reward": 0.2891865149140358,
251
+ "step": 22
252
+ },
253
+ {
254
+ "completion_length": 389.78051376342773,
255
+ "epoch": 0.12876137158852344,
256
+ "grad_norm": 0.6791955232620239,
257
+ "learning_rate": 1e-06,
258
+ "loss": 0.0,
259
+ "reward": 0.3836805671453476,
260
+ "reward_std": 0.12086579436436296,
261
+ "rewards/semantic_entropy_math_reward": 0.3836805671453476,
262
+ "step": 23
263
+ },
264
+ {
265
+ "completion_length": 364.2626552581787,
266
+ "epoch": 0.13435969209237228,
267
+ "grad_norm": 1.3652504682540894,
268
+ "learning_rate": 1e-06,
269
+ "loss": 0.0,
270
+ "reward": 0.32800100184977055,
271
+ "reward_std": 0.11237839260138571,
272
+ "rewards/semantic_entropy_math_reward": 0.32800100184977055,
273
+ "step": 24
274
+ },
275
+ {
276
+ "completion_length": 343.3311061859131,
277
+ "epoch": 0.13995801259622112,
278
+ "grad_norm": 1.9864494800567627,
279
+ "learning_rate": 1e-06,
280
+ "loss": 0.0,
281
+ "reward": 0.27318949438631535,
282
+ "reward_std": 0.10484480275772512,
283
+ "rewards/semantic_entropy_math_reward": 0.27318949438631535,
284
+ "step": 25
285
+ },
286
+ {
287
+ "completion_length": 363.51934814453125,
288
+ "epoch": 0.14555633310007,
289
+ "grad_norm": 5.861116886138916,
290
+ "learning_rate": 1e-06,
291
+ "loss": 0.0,
292
+ "reward": 0.18601191136986017,
293
+ "reward_std": 0.07814609340857714,
294
+ "rewards/semantic_entropy_math_reward": 0.18601191136986017,
295
+ "step": 26
296
+ },
297
+ {
298
+ "completion_length": 637.8519458770752,
299
+ "epoch": 0.15115465360391883,
300
+ "grad_norm": 7.510809898376465,
301
+ "learning_rate": 1e-06,
302
+ "loss": 0.0,
303
+ "reward": 0.07031250384170562,
304
+ "reward_std": 0.03756278438959271,
305
+ "rewards/semantic_entropy_math_reward": 0.07031250384170562,
306
+ "step": 27
307
+ },
308
+ {
309
+ "completion_length": 925.8988342285156,
310
+ "epoch": 0.15675297410776767,
311
+ "grad_norm": 3.905956745147705,
312
+ "learning_rate": 1e-06,
313
+ "loss": 0.0,
314
+ "reward": 0.0363343273056671,
315
+ "reward_std": 0.018063054973026738,
316
+ "rewards/semantic_entropy_math_reward": 0.0363343273056671,
317
+ "step": 28
318
+ },
319
+ {
320
+ "completion_length": 894.2656402587891,
321
+ "epoch": 0.16235129461161651,
322
+ "grad_norm": 3.104583501815796,
323
+ "learning_rate": 1e-06,
324
+ "loss": 0.0,
325
+ "reward": 0.08692956482991576,
326
+ "reward_std": 0.043345644196961075,
327
+ "rewards/semantic_entropy_math_reward": 0.08692956482991576,
328
+ "step": 29
329
+ },
330
+ {
331
+ "completion_length": 774.4866256713867,
332
+ "epoch": 0.16794961511546536,
333
+ "grad_norm": 17.407346725463867,
334
+ "learning_rate": 1e-06,
335
+ "loss": 0.0,
336
+ "reward": 0.13442460726946592,
337
+ "reward_std": 0.07532014499884099,
338
+ "rewards/semantic_entropy_math_reward": 0.13442460726946592,
339
+ "step": 30
340
+ },
341
+ {
342
+ "completion_length": 434.0379524230957,
343
+ "epoch": 0.1735479356193142,
344
+ "grad_norm": 7.19148588180542,
345
+ "learning_rate": 1e-06,
346
+ "loss": 0.0,
347
+ "reward": 0.2876984179019928,
348
+ "reward_std": 0.108892708318308,
349
+ "rewards/semantic_entropy_math_reward": 0.2876984179019928,
350
+ "step": 31
351
+ },
352
+ {
353
+ "completion_length": 405.40477180480957,
354
+ "epoch": 0.17914625612316304,
355
+ "grad_norm": 1.139600157737732,
356
+ "learning_rate": 1e-06,
357
+ "loss": 0.0,
358
+ "reward": 0.34771826211363077,
359
+ "reward_std": 0.13636958738788962,
360
+ "rewards/semantic_entropy_math_reward": 0.34771826211363077,
361
+ "step": 32
362
+ },
363
+ {
364
+ "completion_length": 463.8638458251953,
365
+ "epoch": 0.1847445766270119,
366
+ "grad_norm": 1.6658934354782104,
367
+ "learning_rate": 1e-06,
368
+ "loss": 0.0,
369
+ "reward": 0.35949901584535837,
370
+ "reward_std": 0.12283765664324164,
371
+ "rewards/semantic_entropy_math_reward": 0.35949901584535837,
372
+ "step": 33
373
+ },
374
+ {
375
+ "completion_length": 486.4196491241455,
376
+ "epoch": 0.19034289713086075,
377
+ "grad_norm": 1.7289444208145142,
378
+ "learning_rate": 1e-06,
379
+ "loss": 0.0,
380
+ "reward": 0.3540426651015878,
381
+ "reward_std": 0.10729875811375678,
382
+ "rewards/semantic_entropy_math_reward": 0.3540426651015878,
383
+ "step": 34
384
+ },
385
+ {
386
+ "completion_length": 529.6004600524902,
387
+ "epoch": 0.1959412176347096,
388
+ "grad_norm": 2.8688220977783203,
389
+ "learning_rate": 1e-06,
390
+ "loss": 0.0,
391
+ "reward": 0.3628472303971648,
392
+ "reward_std": 0.11101956386119127,
393
+ "rewards/semantic_entropy_math_reward": 0.3628472303971648,
394
+ "step": 35
395
+ },
396
+ {
397
+ "completion_length": 517.907751083374,
398
+ "epoch": 0.20153953813855843,
399
+ "grad_norm": 2.089806318283081,
400
+ "learning_rate": 1e-06,
401
+ "loss": 0.0,
402
+ "reward": 0.3950892863795161,
403
+ "reward_std": 0.11218473897315562,
404
+ "rewards/semantic_entropy_math_reward": 0.3950892863795161,
405
+ "step": 36
406
+ },
407
+ {
408
+ "completion_length": 558.5573024749756,
409
+ "epoch": 0.20713785864240727,
410
+ "grad_norm": 11.361101150512695,
411
+ "learning_rate": 1e-06,
412
+ "loss": 0.0,
413
+ "reward": 0.3583829468116164,
414
+ "reward_std": 0.11303460272029042,
415
+ "rewards/semantic_entropy_math_reward": 0.3583829468116164,
416
+ "step": 37
417
+ },
418
+ {
419
+ "completion_length": 640.2663822174072,
420
+ "epoch": 0.21273617914625612,
421
+ "grad_norm": 9.671446800231934,
422
+ "learning_rate": 1e-06,
423
+ "loss": 0.0,
424
+ "reward": 0.30580358393490314,
425
+ "reward_std": 0.09917579509783536,
426
+ "rewards/semantic_entropy_math_reward": 0.30580358393490314,
427
+ "step": 38
428
+ },
429
+ {
430
+ "completion_length": 656.6250057220459,
431
+ "epoch": 0.21833449965010496,
432
+ "grad_norm": 4.101109981536865,
433
+ "learning_rate": 1e-06,
434
+ "loss": 0.0,
435
+ "reward": 0.25942460889928043,
436
+ "reward_std": 0.08745632297359407,
437
+ "rewards/semantic_entropy_math_reward": 0.25942460889928043,
438
+ "step": 39
439
+ },
440
+ {
441
+ "completion_length": 694.5468864440918,
442
+ "epoch": 0.22393282015395383,
443
+ "grad_norm": 1.9280970096588135,
444
+ "learning_rate": 1e-06,
445
+ "loss": 0.0,
446
+ "reward": 0.21515377657487988,
447
+ "reward_std": 0.10014541284181178,
448
+ "rewards/semantic_entropy_math_reward": 0.21515377657487988,
449
+ "step": 40
450
+ },
451
+ {
452
+ "completion_length": 682.7931613922119,
453
+ "epoch": 0.22953114065780267,
454
+ "grad_norm": 1.6009750366210938,
455
+ "learning_rate": 1e-06,
456
+ "loss": 0.0,
457
+ "reward": 0.24652778403833508,
458
+ "reward_std": 0.09196118649560958,
459
+ "rewards/semantic_entropy_math_reward": 0.24652778403833508,
460
+ "step": 41
461
+ },
462
+ {
463
+ "completion_length": 691.9077568054199,
464
+ "epoch": 0.2351294611616515,
465
+ "grad_norm": 0.7951090335845947,
466
+ "learning_rate": 1e-06,
467
+ "loss": 0.0,
468
+ "reward": 0.24776786682195961,
469
+ "reward_std": 0.09380459249950945,
470
+ "rewards/semantic_entropy_math_reward": 0.24776786682195961,
471
+ "step": 42
472
+ },
473
+ {
474
+ "completion_length": 655.6860256195068,
475
+ "epoch": 0.24072778166550035,
476
+ "grad_norm": 0.602069079875946,
477
+ "learning_rate": 1e-06,
478
+ "loss": 0.0,
479
+ "reward": 0.25818453216925263,
480
+ "reward_std": 0.08527852385304868,
481
+ "rewards/semantic_entropy_math_reward": 0.25818453216925263,
482
+ "step": 43
483
+ },
484
+ {
485
+ "completion_length": 615.9233703613281,
486
+ "epoch": 0.2463261021693492,
487
+ "grad_norm": 4.384374618530273,
488
+ "learning_rate": 1e-06,
489
+ "loss": 0.0,
490
+ "reward": 0.3162202490493655,
491
+ "reward_std": 0.09647805755957961,
492
+ "rewards/semantic_entropy_math_reward": 0.3162202490493655,
493
+ "step": 44
494
+ },
495
+ {
496
+ "completion_length": 572.6949520111084,
497
+ "epoch": 0.25192442267319803,
498
+ "grad_norm": 3.9872536659240723,
499
+ "learning_rate": 1e-06,
500
+ "loss": 0.0,
501
+ "reward": 0.3339533871039748,
502
+ "reward_std": 0.10575204784981906,
503
+ "rewards/semantic_entropy_math_reward": 0.3339533871039748,
504
+ "step": 45
505
+ },
506
+ {
507
+ "completion_length": 504.4799213409424,
508
+ "epoch": 0.2575227431770469,
509
+ "grad_norm": 0.722204327583313,
510
+ "learning_rate": 1e-06,
511
+ "loss": 0.0,
512
+ "reward": 0.3819444477558136,
513
+ "reward_std": 0.12700667465105653,
514
+ "rewards/semantic_entropy_math_reward": 0.3819444477558136,
515
+ "step": 46
516
+ },
517
+ {
518
+ "completion_length": 444.5267906188965,
519
+ "epoch": 0.2631210636808957,
520
+ "grad_norm": 1.130468487739563,
521
+ "learning_rate": 1e-06,
522
+ "loss": 0.0,
523
+ "reward": 0.37996032927185297,
524
+ "reward_std": 0.11253955401480198,
525
+ "rewards/semantic_entropy_math_reward": 0.37996032927185297,
526
+ "step": 47
527
+ },
528
+ {
529
+ "completion_length": 446.88840103149414,
530
+ "epoch": 0.26871938418474456,
531
+ "grad_norm": 0.16186973452568054,
532
+ "learning_rate": 1e-06,
533
+ "loss": 0.0,
534
+ "reward": 0.38616071827709675,
535
+ "reward_std": 0.12775122001767159,
536
+ "rewards/semantic_entropy_math_reward": 0.38616071827709675,
537
+ "step": 48
538
+ },
539
+ {
540
+ "completion_length": 465.04167556762695,
541
+ "epoch": 0.2743177046885934,
542
+ "grad_norm": 0.16776132583618164,
543
+ "learning_rate": 1e-06,
544
+ "loss": 0.0,
545
+ "reward": 0.33407739456743,
546
+ "reward_std": 0.08897499740123749,
547
+ "rewards/semantic_entropy_math_reward": 0.33407739456743,
548
+ "step": 49
549
+ },
550
+ {
551
+ "completion_length": 447.424861907959,
552
+ "epoch": 0.27991602519244224,
553
+ "grad_norm": 0.3498786687850952,
554
+ "learning_rate": 1e-06,
555
+ "loss": 0.0,
556
+ "reward": 0.3691716268658638,
557
+ "reward_std": 0.11830427148379385,
558
+ "rewards/semantic_entropy_math_reward": 0.3691716268658638,
559
+ "step": 50
560
+ },
561
+ {
562
+ "completion_length": 418.9003047943115,
563
+ "epoch": 0.28551434569629114,
564
+ "grad_norm": 0.4594447612762451,
565
+ "learning_rate": 1e-06,
566
+ "loss": 0.0,
567
+ "reward": 0.36669147573411465,
568
+ "reward_std": 0.11925438744947314,
569
+ "rewards/semantic_entropy_math_reward": 0.36669147573411465,
570
+ "step": 51
571
+ },
572
+ {
573
+ "completion_length": 423.0595302581787,
574
+ "epoch": 0.29111266620014,
575
+ "grad_norm": 1.4399406909942627,
576
+ "learning_rate": 1e-06,
577
+ "loss": 0.0,
578
+ "reward": 0.36941965389996767,
579
+ "reward_std": 0.11310782423242927,
580
+ "rewards/semantic_entropy_math_reward": 0.36941965389996767,
581
+ "step": 52
582
+ },
583
+ {
584
+ "completion_length": 548.479923248291,
585
+ "epoch": 0.2967109867039888,
586
+ "grad_norm": 46.641822814941406,
587
+ "learning_rate": 1e-06,
588
+ "loss": 0.0,
589
+ "reward": 0.36706350184977055,
590
+ "reward_std": 0.10590148437768221,
591
+ "rewards/semantic_entropy_math_reward": 0.36706350184977055,
592
+ "step": 53
593
+ },
594
+ {
595
+ "completion_length": 583.7537307739258,
596
+ "epoch": 0.30230930720783766,
597
+ "grad_norm": 3.891123056411743,
598
+ "learning_rate": 1e-06,
599
+ "loss": 0.0,
600
+ "reward": 0.3219246044754982,
601
+ "reward_std": 0.11164773069322109,
602
+ "rewards/semantic_entropy_math_reward": 0.3219246044754982,
603
+ "step": 54
604
+ },
605
+ {
606
+ "completion_length": 569.5848350524902,
607
+ "epoch": 0.3079076277116865,
608
+ "grad_norm": 5.51741886138916,
609
+ "learning_rate": 1e-06,
610
+ "loss": 0.0,
611
+ "reward": 0.3412698497995734,
612
+ "reward_std": 0.11707511683925986,
613
+ "rewards/semantic_entropy_math_reward": 0.3412698497995734,
614
+ "step": 55
615
+ },
616
+ {
617
+ "completion_length": 589.2455463409424,
618
+ "epoch": 0.31350594821553535,
619
+ "grad_norm": 2.6841273307800293,
620
+ "learning_rate": 1e-06,
621
+ "loss": 0.0,
622
+ "reward": 0.33382937777787447,
623
+ "reward_std": 0.109538255026564,
624
+ "rewards/semantic_entropy_math_reward": 0.33382937777787447,
625
+ "step": 56
626
+ },
627
+ {
628
+ "completion_length": 536.4256038665771,
629
+ "epoch": 0.3191042687193842,
630
+ "grad_norm": 17.737791061401367,
631
+ "learning_rate": 1e-06,
632
+ "loss": 0.0,
633
+ "reward": 0.4088541707023978,
634
+ "reward_std": 0.12808961933478713,
635
+ "rewards/semantic_entropy_math_reward": 0.4088541707023978,
636
+ "step": 57
637
+ },
638
+ {
639
+ "completion_length": 424.6919708251953,
640
+ "epoch": 0.32470258922323303,
641
+ "grad_norm": 1.9888161420822144,
642
+ "learning_rate": 1e-06,
643
+ "loss": 0.0,
644
+ "reward": 0.3665674636140466,
645
+ "reward_std": 0.12649336433969438,
646
+ "rewards/semantic_entropy_math_reward": 0.3665674636140466,
647
+ "step": 58
648
+ },
649
+ {
650
+ "completion_length": 412.2708396911621,
651
+ "epoch": 0.33030090972708187,
652
+ "grad_norm": 0.3387698531150818,
653
+ "learning_rate": 1e-06,
654
+ "loss": 0.0,
655
+ "reward": 0.3368055559694767,
656
+ "reward_std": 0.1261643674224615,
657
+ "rewards/semantic_entropy_math_reward": 0.3368055559694767,
658
+ "step": 59
659
+ },
660
+ {
661
+ "completion_length": 390.2321529388428,
662
+ "epoch": 0.3358992302309307,
663
+ "grad_norm": 0.5467884540557861,
664
+ "learning_rate": 1e-06,
665
+ "loss": 0.0,
666
+ "reward": 0.4055059552192688,
667
+ "reward_std": 0.13236648589372635,
668
+ "rewards/semantic_entropy_math_reward": 0.4055059552192688,
669
+ "step": 60
670
+ },
671
+ {
672
+ "completion_length": 407.4642925262451,
673
+ "epoch": 0.34149755073477955,
674
+ "grad_norm": 0.5977727770805359,
675
+ "learning_rate": 1e-06,
676
+ "loss": 0.0,
677
+ "reward": 0.385416685603559,
678
+ "reward_std": 0.12455174163915217,
679
+ "rewards/semantic_entropy_math_reward": 0.385416685603559,
680
+ "step": 61
681
+ },
682
+ {
683
+ "completion_length": 368.8139953613281,
684
+ "epoch": 0.3470958712386284,
685
+ "grad_norm": 0.7032583355903625,
686
+ "learning_rate": 1e-06,
687
+ "loss": 0.0,
688
+ "reward": 0.3903769990429282,
689
+ "reward_std": 0.14396332437172532,
690
+ "rewards/semantic_entropy_math_reward": 0.3903769990429282,
691
+ "step": 62
692
+ },
693
+ {
694
+ "completion_length": 399.3407802581787,
695
+ "epoch": 0.35269419174247724,
696
+ "grad_norm": 1.6613659858703613,
697
+ "learning_rate": 1e-06,
698
+ "loss": 0.0,
699
+ "reward": 0.3821924654766917,
700
+ "reward_std": 0.13627444114536047,
701
+ "rewards/semantic_entropy_math_reward": 0.3821924654766917,
702
+ "step": 63
703
+ },
704
+ {
705
+ "completion_length": 386.2135524749756,
706
+ "epoch": 0.3582925122463261,
707
+ "grad_norm": 0.297186940908432,
708
+ "learning_rate": 1e-06,
709
+ "loss": 0.0,
710
+ "reward": 0.4055059542879462,
711
+ "reward_std": 0.1174642383120954,
712
+ "rewards/semantic_entropy_math_reward": 0.4055059542879462,
713
+ "step": 64
714
+ },
715
+ {
716
+ "completion_length": 414.6153335571289,
717
+ "epoch": 0.363890832750175,
718
+ "grad_norm": 0.2781192660331726,
719
+ "learning_rate": 1e-06,
720
+ "loss": 0.0,
721
+ "reward": 0.37723215017467737,
722
+ "reward_std": 0.1255739361513406,
723
+ "rewards/semantic_entropy_math_reward": 0.37723215017467737,
724
+ "step": 65
725
+ },
726
+ {
727
+ "completion_length": 402.4501533508301,
728
+ "epoch": 0.3694891532540238,
729
+ "grad_norm": 0.2593112587928772,
730
+ "learning_rate": 1e-06,
731
+ "loss": 0.0,
732
+ "reward": 0.3885168796405196,
733
+ "reward_std": 0.12460076110437512,
734
+ "rewards/semantic_entropy_math_reward": 0.3885168796405196,
735
+ "step": 66
736
+ },
737
+ {
738
+ "completion_length": 417.7031364440918,
739
+ "epoch": 0.37508747375787266,
740
+ "grad_norm": 0.24701371788978577,
741
+ "learning_rate": 1e-06,
742
+ "loss": 0.0,
743
+ "reward": 0.37400794960558414,
744
+ "reward_std": 0.1167642290238291,
745
+ "rewards/semantic_entropy_math_reward": 0.37400794960558414,
746
+ "step": 67
747
+ },
748
+ {
749
+ "completion_length": 397.74777603149414,
750
+ "epoch": 0.3806857942617215,
751
+ "grad_norm": 0.2930716872215271,
752
+ "learning_rate": 1e-06,
753
+ "loss": 0.0,
754
+ "reward": 0.4553571604192257,
755
+ "reward_std": 0.11352779855951667,
756
+ "rewards/semantic_entropy_math_reward": 0.4553571604192257,
757
+ "step": 68
758
+ },
759
+ {
760
+ "completion_length": 415.2507495880127,
761
+ "epoch": 0.38628411476557034,
762
+ "grad_norm": 0.28370365500450134,
763
+ "learning_rate": 1e-06,
764
+ "loss": 0.0,
765
+ "reward": 0.3591269953176379,
766
+ "reward_std": 0.1410878817550838,
767
+ "rewards/semantic_entropy_math_reward": 0.3591269953176379,
768
+ "step": 69
769
+ },
770
+ {
771
+ "completion_length": 415.7284297943115,
772
+ "epoch": 0.3918824352694192,
773
+ "grad_norm": 0.25602635741233826,
774
+ "learning_rate": 1e-06,
775
+ "loss": 0.0,
776
+ "reward": 0.36582342255860567,
777
+ "reward_std": 0.12171732308343053,
778
+ "rewards/semantic_entropy_math_reward": 0.36582342255860567,
779
+ "step": 70
780
+ },
781
+ {
782
+ "completion_length": 399.8206958770752,
783
+ "epoch": 0.397480755773268,
784
+ "grad_norm": 0.2549217641353607,
785
+ "learning_rate": 1e-06,
786
+ "loss": 0.0,
787
+ "reward": 0.462301604449749,
788
+ "reward_std": 0.13242140784859657,
789
+ "rewards/semantic_entropy_math_reward": 0.462301604449749,
790
+ "step": 71
791
+ },
792
+ {
793
+ "completion_length": 405.83185386657715,
794
+ "epoch": 0.40307907627711687,
795
+ "grad_norm": 0.26034626364707947,
796
+ "learning_rate": 1e-06,
797
+ "loss": 0.0,
798
+ "reward": 0.38876488897949457,
799
+ "reward_std": 0.12143889861181378,
800
+ "rewards/semantic_entropy_math_reward": 0.38876488897949457,
801
+ "step": 72
802
+ },
803
+ {
804
+ "completion_length": 389.92634773254395,
805
+ "epoch": 0.4086773967809657,
806
+ "grad_norm": 0.3175303041934967,
807
+ "learning_rate": 1e-06,
808
+ "loss": 0.0,
809
+ "reward": 0.3635912863537669,
810
+ "reward_std": 0.1250197091139853,
811
+ "rewards/semantic_entropy_math_reward": 0.3635912863537669,
812
+ "step": 73
813
+ },
814
+ {
815
+ "completion_length": 405.5067024230957,
816
+ "epoch": 0.41427571728481455,
817
+ "grad_norm": 0.3240727484226227,
818
+ "learning_rate": 1e-06,
819
+ "loss": 0.0,
820
+ "reward": 0.3521825484931469,
821
+ "reward_std": 0.1221736806910485,
822
+ "rewards/semantic_entropy_math_reward": 0.3521825484931469,
823
+ "step": 74
824
+ },
825
+ {
826
+ "completion_length": 395.6436080932617,
827
+ "epoch": 0.4198740377886634,
828
+ "grad_norm": 0.6251773834228516,
829
+ "learning_rate": 1e-06,
830
+ "loss": 0.0,
831
+ "reward": 0.39942957274615765,
832
+ "reward_std": 0.13623498589731753,
833
+ "rewards/semantic_entropy_math_reward": 0.39942957274615765,
834
+ "step": 75
835
+ },
836
+ {
837
+ "completion_length": 390.2894401550293,
838
+ "epoch": 0.42547235829251223,
839
+ "grad_norm": 0.8764235377311707,
840
+ "learning_rate": 1e-06,
841
+ "loss": 0.0,
842
+ "reward": 0.428075410425663,
843
+ "reward_std": 0.12979253917001188,
844
+ "rewards/semantic_entropy_math_reward": 0.428075410425663,
845
+ "step": 76
846
+ },
847
+ {
848
+ "completion_length": 400.01861000061035,
849
+ "epoch": 0.4310706787963611,
850
+ "grad_norm": 0.8677442669868469,
851
+ "learning_rate": 1e-06,
852
+ "loss": 0.0,
853
+ "reward": 0.37165179289877415,
854
+ "reward_std": 0.12289492227137089,
855
+ "rewards/semantic_entropy_math_reward": 0.37165179289877415,
856
+ "step": 77
857
+ },
858
+ {
859
+ "completion_length": 377.8936080932617,
860
+ "epoch": 0.4366689993002099,
861
+ "grad_norm": 0.9437581896781921,
862
+ "learning_rate": 1e-06,
863
+ "loss": 0.0,
864
+ "reward": 0.373139888048172,
865
+ "reward_std": 0.1251775654964149,
866
+ "rewards/semantic_entropy_math_reward": 0.373139888048172,
867
+ "step": 78
868
+ },
869
+ {
870
+ "completion_length": 340.2700939178467,
871
+ "epoch": 0.44226731980405876,
872
+ "grad_norm": 2.5648627281188965,
873
+ "learning_rate": 1e-06,
874
+ "loss": 0.0,
875
+ "reward": 0.3627232192084193,
876
+ "reward_std": 0.15555713046342134,
877
+ "rewards/semantic_entropy_math_reward": 0.3627232192084193,
878
+ "step": 79
879
+ },
880
+ {
881
+ "completion_length": 268.25521659851074,
882
+ "epoch": 0.44786564030790765,
883
+ "grad_norm": 4.964110851287842,
884
+ "learning_rate": 1e-06,
885
+ "loss": 0.0,
886
+ "reward": 0.32589286658912897,
887
+ "reward_std": 0.1293578795157373,
888
+ "rewards/semantic_entropy_math_reward": 0.32589286658912897,
889
+ "step": 80
890
+ },
891
+ {
892
+ "completion_length": 245.03795051574707,
893
+ "epoch": 0.4534639608117565,
894
+ "grad_norm": 2.0234344005584717,
895
+ "learning_rate": 1e-06,
896
+ "loss": 0.0,
897
+ "reward": 0.34858631901443005,
898
+ "reward_std": 0.1360030840151012,
899
+ "rewards/semantic_entropy_math_reward": 0.34858631901443005,
900
+ "step": 81
901
+ },
902
+ {
903
+ "completion_length": 201.16071891784668,
904
+ "epoch": 0.45906228131560534,
905
+ "grad_norm": 10.089159965515137,
906
+ "learning_rate": 1e-06,
907
+ "loss": 0.0,
908
+ "reward": 0.34126985911279917,
909
+ "reward_std": 0.14876779448240995,
910
+ "rewards/semantic_entropy_math_reward": 0.34126985911279917,
911
+ "step": 82
912
+ },
913
+ {
914
+ "completion_length": 163.12500381469727,
915
+ "epoch": 0.4646606018194542,
916
+ "grad_norm": 7.054829120635986,
917
+ "learning_rate": 1e-06,
918
+ "loss": 0.0,
919
+ "reward": 0.33816965017467737,
920
+ "reward_std": 0.14539710199460387,
921
+ "rewards/semantic_entropy_math_reward": 0.33816965017467737,
922
+ "step": 83
923
+ },
924
+ {
925
+ "completion_length": 172.52455711364746,
926
+ "epoch": 0.470258922323303,
927
+ "grad_norm": 3.74849796295166,
928
+ "learning_rate": 1e-06,
929
+ "loss": 0.0,
930
+ "reward": 0.3324652798473835,
931
+ "reward_std": 0.14292793814092875,
932
+ "rewards/semantic_entropy_math_reward": 0.3324652798473835,
933
+ "step": 84
934
+ },
935
+ {
936
+ "completion_length": 190.39881229400635,
937
+ "epoch": 0.47585724282715186,
938
+ "grad_norm": 3.7803595066070557,
939
+ "learning_rate": 1e-06,
940
+ "loss": 0.0,
941
+ "reward": 0.36879961006343365,
942
+ "reward_std": 0.14196715434081852,
943
+ "rewards/semantic_entropy_math_reward": 0.36879961006343365,
944
+ "step": 85
945
+ },
946
+ {
947
+ "completion_length": 202.71949863433838,
948
+ "epoch": 0.4814555633310007,
949
+ "grad_norm": 7.561800479888916,
950
+ "learning_rate": 1e-06,
951
+ "loss": 0.0,
952
+ "reward": 0.3539186557754874,
953
+ "reward_std": 0.13645589677616954,
954
+ "rewards/semantic_entropy_math_reward": 0.3539186557754874,
955
+ "step": 86
956
+ },
957
+ {
958
+ "completion_length": 192.92336750030518,
959
+ "epoch": 0.48705388383484954,
960
+ "grad_norm": 6.106121063232422,
961
+ "learning_rate": 1e-06,
962
+ "loss": 0.0,
963
+ "reward": 0.41753472574055195,
964
+ "reward_std": 0.14698259718716145,
965
+ "rewards/semantic_entropy_math_reward": 0.41753472574055195,
966
+ "step": 87
967
+ },
968
+ {
969
+ "completion_length": 192.75223636627197,
970
+ "epoch": 0.4926522043386984,
971
+ "grad_norm": 7.5285186767578125,
972
+ "learning_rate": 1e-06,
973
+ "loss": 0.0,
974
+ "reward": 0.3844246091321111,
975
+ "reward_std": 0.154494424816221,
976
+ "rewards/semantic_entropy_math_reward": 0.3844246091321111,
977
+ "step": 88
978
+ },
979
+ {
980
+ "completion_length": 196.7128028869629,
981
+ "epoch": 0.4982505248425472,
982
+ "grad_norm": 5.367832183837891,
983
+ "learning_rate": 1e-06,
984
+ "loss": 0.0,
985
+ "reward": 0.40811011102050543,
986
+ "reward_std": 0.14661366492509842,
987
+ "rewards/semantic_entropy_math_reward": 0.40811011102050543,
988
+ "step": 89
989
+ },
990
+ {
991
+ "completion_length": 200.94940757751465,
992
+ "epoch": 0.5038488453463961,
993
+ "grad_norm": 18.42447853088379,
994
+ "learning_rate": 1e-06,
995
+ "loss": 0.0,
996
+ "reward": 0.4272073404863477,
997
+ "reward_std": 0.14780932199209929,
998
+ "rewards/semantic_entropy_math_reward": 0.4272073404863477,
999
+ "step": 90
1000
+ },
1001
+ {
1002
+ "completion_length": 204.03497314453125,
1003
+ "epoch": 0.509447165850245,
1004
+ "grad_norm": 7.817193984985352,
1005
+ "learning_rate": 1e-06,
1006
+ "loss": 0.0,
1007
+ "reward": 0.418278768658638,
1008
+ "reward_std": 0.13225925154983997,
1009
+ "rewards/semantic_entropy_math_reward": 0.418278768658638,
1010
+ "step": 91
1011
+ },
1012
+ {
1013
+ "completion_length": 192.34896183013916,
1014
+ "epoch": 0.5150454863540938,
1015
+ "grad_norm": 6.789738655090332,
1016
+ "learning_rate": 1e-06,
1017
+ "loss": 0.0,
1018
+ "reward": 0.47457837499678135,
1019
+ "reward_std": 0.14893107814714313,
1020
+ "rewards/semantic_entropy_math_reward": 0.47457837499678135,
1021
+ "step": 92
1022
+ },
1023
+ {
1024
+ "completion_length": 210.90179061889648,
1025
+ "epoch": 0.5206438068579426,
1026
+ "grad_norm": 8.528867721557617,
1027
+ "learning_rate": 1e-06,
1028
+ "loss": 0.0,
1029
+ "reward": 0.41455854661762714,
1030
+ "reward_std": 0.1263797995634377,
1031
+ "rewards/semantic_entropy_math_reward": 0.41455854661762714,
1032
+ "step": 93
1033
+ },
1034
+ {
1035
+ "completion_length": 222.63169956207275,
1036
+ "epoch": 0.5262421273617914,
1037
+ "grad_norm": 10.166954040527344,
1038
+ "learning_rate": 1e-06,
1039
+ "loss": 0.0,
1040
+ "reward": 0.41530259046703577,
1041
+ "reward_std": 0.1387968505732715,
1042
+ "rewards/semantic_entropy_math_reward": 0.41530259046703577,
1043
+ "step": 94
1044
+ },
1045
+ {
1046
+ "completion_length": 208.9508981704712,
1047
+ "epoch": 0.5318404478656403,
1048
+ "grad_norm": 8.634490966796875,
1049
+ "learning_rate": 1e-06,
1050
+ "loss": 0.0,
1051
+ "reward": 0.45324901584535837,
1052
+ "reward_std": 0.1519172815605998,
1053
+ "rewards/semantic_entropy_math_reward": 0.45324901584535837,
1054
+ "step": 95
1055
+ },
1056
+ {
1057
+ "completion_length": 240.5915231704712,
1058
+ "epoch": 0.5374387683694891,
1059
+ "grad_norm": 15.801514625549316,
1060
+ "learning_rate": 1e-06,
1061
+ "loss": 0.0,
1062
+ "reward": 0.4201389057561755,
1063
+ "reward_std": 0.15532761020585895,
1064
+ "rewards/semantic_entropy_math_reward": 0.4201389057561755,
1065
+ "step": 96
1066
+ },
1067
+ {
1068
+ "completion_length": 288.0282793045044,
1069
+ "epoch": 0.543037088873338,
1070
+ "grad_norm": 7.69626522064209,
1071
+ "learning_rate": 1e-06,
1072
+ "loss": 0.0,
1073
+ "reward": 0.49342758767306805,
1074
+ "reward_std": 0.1336272112093866,
1075
+ "rewards/semantic_entropy_math_reward": 0.49342758767306805,
1076
+ "step": 97
1077
+ },
1078
+ {
1079
+ "completion_length": 570.5766410827637,
1080
+ "epoch": 0.5486354093771868,
1081
+ "grad_norm": 9.325577735900879,
1082
+ "learning_rate": 1e-06,
1083
+ "loss": 0.0,
1084
+ "reward": 0.4603174738585949,
1085
+ "reward_std": 0.14188312110491097,
1086
+ "rewards/semantic_entropy_math_reward": 0.4603174738585949,
1087
+ "step": 98
1088
+ },
1089
+ {
1090
+ "completion_length": 924.8459968566895,
1091
+ "epoch": 0.5542337298810357,
1092
+ "grad_norm": 4.683976650238037,
1093
+ "learning_rate": 1e-06,
1094
+ "loss": 0.0,
1095
+ "reward": 0.4341518022119999,
1096
+ "reward_std": 0.15863031102344394,
1097
+ "rewards/semantic_entropy_math_reward": 0.4341518022119999,
1098
+ "step": 99
1099
+ },
1100
+ {
1101
+ "completion_length": 975.7284393310547,
1102
+ "epoch": 0.5598320503848845,
1103
+ "grad_norm": 6.508670806884766,
1104
+ "learning_rate": 1e-06,
1105
+ "loss": 0.0,
1106
+ "reward": 0.43588790111243725,
1107
+ "reward_std": 0.1634892332367599,
1108
+ "rewards/semantic_entropy_math_reward": 0.43588790111243725,
1109
+ "step": 100
1110
+ },
1111
+ {
1112
+ "completion_length": 979.6651954650879,
1113
+ "epoch": 0.5654303708887334,
1114
+ "grad_norm": 6.556097507476807,
1115
+ "learning_rate": 1e-06,
1116
+ "loss": 0.0,
1117
+ "reward": 0.4521329505369067,
1118
+ "reward_std": 0.16528919152915478,
1119
+ "rewards/semantic_entropy_math_reward": 0.4521329505369067,
1120
+ "step": 101
1121
+ },
1122
+ {
1123
+ "completion_length": 966.6339378356934,
1124
+ "epoch": 0.5710286913925823,
1125
+ "grad_norm": 1.9696649312973022,
1126
+ "learning_rate": 1e-06,
1127
+ "loss": 0.0,
1128
+ "reward": 0.45213295705616474,
1129
+ "reward_std": 0.1434005326591432,
1130
+ "rewards/semantic_entropy_math_reward": 0.45213295705616474,
1131
+ "step": 102
1132
+ },
1133
+ {
1134
+ "completion_length": 933.5625114440918,
1135
+ "epoch": 0.5766270118964311,
1136
+ "grad_norm": 2.5271127223968506,
1137
+ "learning_rate": 1e-06,
1138
+ "loss": 0.0,
1139
+ "reward": 0.4157986231148243,
1140
+ "reward_std": 0.15396162681281567,
1141
+ "rewards/semantic_entropy_math_reward": 0.4157986231148243,
1142
+ "step": 103
1143
+ },
1144
+ {
1145
+ "completion_length": 609.2589454650879,
1146
+ "epoch": 0.58222533240028,
1147
+ "grad_norm": 2.110628366470337,
1148
+ "learning_rate": 1e-06,
1149
+ "loss": 0.0,
1150
+ "reward": 0.5197172742336988,
1151
+ "reward_std": 0.1446234486065805,
1152
+ "rewards/semantic_entropy_math_reward": 0.5197172742336988,
1153
+ "step": 104
1154
+ },
1155
+ {
1156
+ "completion_length": 299.1971788406372,
1157
+ "epoch": 0.5878236529041287,
1158
+ "grad_norm": 1.4388288259506226,
1159
+ "learning_rate": 1e-06,
1160
+ "loss": 0.0,
1161
+ "reward": 0.49330358020961285,
1162
+ "reward_std": 0.15013255970552564,
1163
+ "rewards/semantic_entropy_math_reward": 0.49330358020961285,
1164
+ "step": 105
1165
+ },
1166
+ {
1167
+ "completion_length": 281.92931747436523,
1168
+ "epoch": 0.5934219734079776,
1169
+ "grad_norm": 1.1156909465789795,
1170
+ "learning_rate": 1e-06,
1171
+ "loss": 0.0,
1172
+ "reward": 0.5328621100634336,
1173
+ "reward_std": 0.16049590334296227,
1174
+ "rewards/semantic_entropy_math_reward": 0.5328621100634336,
1175
+ "step": 106
1176
+ },
1177
+ {
1178
+ "completion_length": 277.87053871154785,
1179
+ "epoch": 0.5990202939118264,
1180
+ "grad_norm": 1.5050427913665771,
1181
+ "learning_rate": 1e-06,
1182
+ "loss": 0.0,
1183
+ "reward": 0.4991319552063942,
1184
+ "reward_std": 0.13887666864320636,
1185
+ "rewards/semantic_entropy_math_reward": 0.4991319552063942,
1186
+ "step": 107
1187
+ },
1188
+ {
1189
+ "completion_length": 244.03050994873047,
1190
+ "epoch": 0.6046186144156753,
1191
+ "grad_norm": 2.5947532653808594,
1192
+ "learning_rate": 1e-06,
1193
+ "loss": 0.0,
1194
+ "reward": 0.5357143003493547,
1195
+ "reward_std": 0.15269030537456274,
1196
+ "rewards/semantic_entropy_math_reward": 0.5357143003493547,
1197
+ "step": 108
1198
+ },
1199
+ {
1200
+ "completion_length": 137.79092693328857,
1201
+ "epoch": 0.6102169349195241,
1202
+ "grad_norm": 4.043277263641357,
1203
+ "learning_rate": 1e-06,
1204
+ "loss": 0.0,
1205
+ "reward": 0.5419146958738565,
1206
+ "reward_std": 0.1557740089483559,
1207
+ "rewards/semantic_entropy_math_reward": 0.5419146958738565,
1208
+ "step": 109
1209
+ },
1210
+ {
1211
+ "completion_length": 107.52009153366089,
1212
+ "epoch": 0.615815255423373,
1213
+ "grad_norm": 1.8183155059814453,
1214
+ "learning_rate": 1e-06,
1215
+ "loss": 0.0,
1216
+ "reward": 0.5765129253268242,
1217
+ "reward_std": 0.12702399701811373,
1218
+ "rewards/semantic_entropy_math_reward": 0.5765129253268242,
1219
+ "step": 110
1220
+ },
1221
+ {
1222
+ "completion_length": 96.88541841506958,
1223
+ "epoch": 0.6214135759272218,
1224
+ "grad_norm": 2.091888904571533,
1225
+ "learning_rate": 1e-06,
1226
+ "loss": 0.0,
1227
+ "reward": 0.6276041902601719,
1228
+ "reward_std": 0.13468103110790253,
1229
+ "rewards/semantic_entropy_math_reward": 0.6276041902601719,
1230
+ "step": 111
1231
+ },
1232
+ {
1233
+ "completion_length": 138.0691990852356,
1234
+ "epoch": 0.6270118964310707,
1235
+ "grad_norm": 1.2132967710494995,
1236
+ "learning_rate": 1e-06,
1237
+ "loss": 0.0,
1238
+ "reward": 0.5658482331782579,
1239
+ "reward_std": 0.14125833846628666,
1240
+ "rewards/semantic_entropy_math_reward": 0.5658482331782579,
1241
+ "step": 112
1242
+ },
1243
+ {
1244
+ "completion_length": 168.62128067016602,
1245
+ "epoch": 0.6326102169349195,
1246
+ "grad_norm": 1.3657702207565308,
1247
+ "learning_rate": 1e-06,
1248
+ "loss": 0.0,
1249
+ "reward": 0.5846974309533834,
1250
+ "reward_std": 0.12009491049684584,
1251
+ "rewards/semantic_entropy_math_reward": 0.5846974309533834,
1252
+ "step": 113
1253
+ },
1254
+ {
1255
+ "completion_length": 172.63616180419922,
1256
+ "epoch": 0.6382085374387684,
1257
+ "grad_norm": 4.615123748779297,
1258
+ "learning_rate": 1e-06,
1259
+ "loss": 0.0,
1260
+ "reward": 0.6726190745830536,
1261
+ "reward_std": 0.14088664995506406,
1262
+ "rewards/semantic_entropy_math_reward": 0.6726190745830536,
1263
+ "step": 114
1264
+ },
1265
+ {
1266
+ "completion_length": 184.50372314453125,
1267
+ "epoch": 0.6438068579426172,
1268
+ "grad_norm": 7.566853046417236,
1269
+ "learning_rate": 1e-06,
1270
+ "loss": 0.0,
1271
+ "reward": 0.623387910425663,
1272
+ "reward_std": 0.1348249651491642,
1273
+ "rewards/semantic_entropy_math_reward": 0.623387910425663,
1274
+ "step": 115
1275
+ },
1276
+ {
1277
+ "completion_length": 268.1763458251953,
1278
+ "epoch": 0.6494051784464661,
1279
+ "grad_norm": 3.787036657333374,
1280
+ "learning_rate": 1e-06,
1281
+ "loss": 0.0,
1282
+ "reward": 0.5989583469927311,
1283
+ "reward_std": 0.14310514647513628,
1284
+ "rewards/semantic_entropy_math_reward": 0.5989583469927311,
1285
+ "step": 116
1286
+ },
1287
+ {
1288
+ "completion_length": 424.9270887374878,
1289
+ "epoch": 0.655003498950315,
1290
+ "grad_norm": 1.769083857536316,
1291
+ "learning_rate": 1e-06,
1292
+ "loss": 0.0,
1293
+ "reward": 0.6443452537059784,
1294
+ "reward_std": 0.1348385107703507,
1295
+ "rewards/semantic_entropy_math_reward": 0.6443452537059784,
1296
+ "step": 117
1297
+ },
1298
+ {
1299
+ "completion_length": 627.7061138153076,
1300
+ "epoch": 0.6606018194541637,
1301
+ "grad_norm": 7.572482109069824,
1302
+ "learning_rate": 1e-06,
1303
+ "loss": 0.0,
1304
+ "reward": 0.6216518022119999,
1305
+ "reward_std": 0.12173205520957708,
1306
+ "rewards/semantic_entropy_math_reward": 0.6216518022119999,
1307
+ "step": 118
1308
+ },
1309
+ {
1310
+ "completion_length": 597.2857189178467,
1311
+ "epoch": 0.6662001399580126,
1312
+ "grad_norm": 4.5395965576171875,
1313
+ "learning_rate": 1e-06,
1314
+ "loss": 0.0,
1315
+ "reward": 0.6326885130256414,
1316
+ "reward_std": 0.11796635785140097,
1317
+ "rewards/semantic_entropy_math_reward": 0.6326885130256414,
1318
+ "step": 119
1319
+ },
1320
+ {
1321
+ "completion_length": 499.34674072265625,
1322
+ "epoch": 0.6717984604618614,
1323
+ "grad_norm": 9.828434944152832,
1324
+ "learning_rate": 1e-06,
1325
+ "loss": 0.0,
1326
+ "reward": 0.6607142873108387,
1327
+ "reward_std": 0.12943885754793882,
1328
+ "rewards/semantic_entropy_math_reward": 0.6607142873108387,
1329
+ "step": 120
1330
+ },
1331
+ {
1332
+ "completion_length": 416.2641429901123,
1333
+ "epoch": 0.6773967809657103,
1334
+ "grad_norm": 4.24314022064209,
1335
+ "learning_rate": 1e-06,
1336
+ "loss": 0.0,
1337
+ "reward": 0.7173859365284443,
1338
+ "reward_std": 0.10959133435972035,
1339
+ "rewards/semantic_entropy_math_reward": 0.7173859365284443,
1340
+ "step": 121
1341
+ },
1342
+ {
1343
+ "completion_length": 133.405508518219,
1344
+ "epoch": 0.6829951014695591,
1345
+ "grad_norm": 5.226346969604492,
1346
+ "learning_rate": 1e-06,
1347
+ "loss": 0.0,
1348
+ "reward": 0.6529017835855484,
1349
+ "reward_std": 0.13599514495581388,
1350
+ "rewards/semantic_entropy_math_reward": 0.6529017835855484,
1351
+ "step": 122
1352
+ },
1353
+ {
1354
+ "completion_length": 108.45610427856445,
1355
+ "epoch": 0.688593421973408,
1356
+ "grad_norm": 4.747099876403809,
1357
+ "learning_rate": 1e-06,
1358
+ "loss": 0.0,
1359
+ "reward": 0.6951884962618351,
1360
+ "reward_std": 0.1218845120165497,
1361
+ "rewards/semantic_entropy_math_reward": 0.6951884962618351,
1362
+ "step": 123
1363
+ },
1364
+ {
1365
+ "completion_length": 116.35714483261108,
1366
+ "epoch": 0.6941917424772568,
1367
+ "grad_norm": 5.495078086853027,
1368
+ "learning_rate": 1e-06,
1369
+ "loss": 0.0,
1370
+ "reward": 0.7322668917477131,
1371
+ "reward_std": 0.10345165804028511,
1372
+ "rewards/semantic_entropy_math_reward": 0.7322668917477131,
1373
+ "step": 124
1374
+ },
1375
+ {
1376
+ "completion_length": 190.98735427856445,
1377
+ "epoch": 0.6997900629811057,
1378
+ "grad_norm": 4.758305072784424,
1379
+ "learning_rate": 1e-06,
1380
+ "loss": 0.0,
1381
+ "reward": 0.6990327574312687,
1382
+ "reward_std": 0.1072727048303932,
1383
+ "rewards/semantic_entropy_math_reward": 0.6990327574312687,
1384
+ "step": 125
1385
+ },
1386
+ {
1387
+ "completion_length": 154.2269356250763,
1388
+ "epoch": 0.7053883834849545,
1389
+ "grad_norm": 4.895815372467041,
1390
+ "learning_rate": 1e-06,
1391
+ "loss": 0.0,
1392
+ "reward": 0.7566964458674192,
1393
+ "reward_std": 0.10861217533238232,
1394
+ "rewards/semantic_entropy_math_reward": 0.7566964458674192,
1395
+ "step": 126
1396
+ },
1397
+ {
1398
+ "completion_length": 219.0520896911621,
1399
+ "epoch": 0.7109867039888034,
1400
+ "grad_norm": 5.073260307312012,
1401
+ "learning_rate": 1e-06,
1402
+ "loss": 0.0,
1403
+ "reward": 0.7007688824087381,
1404
+ "reward_std": 0.10281824320554733,
1405
+ "rewards/semantic_entropy_math_reward": 0.7007688824087381,
1406
+ "step": 127
1407
+ },
1408
+ {
1409
+ "completion_length": 150.2425627708435,
1410
+ "epoch": 0.7165850244926522,
1411
+ "grad_norm": 6.930428981781006,
1412
+ "learning_rate": 1e-06,
1413
+ "loss": 0.0,
1414
+ "reward": 0.7514881230890751,
1415
+ "reward_std": 0.11211708444170654,
1416
+ "rewards/semantic_entropy_math_reward": 0.7514881230890751,
1417
+ "step": 128
1418
+ },
1419
+ {
1420
+ "completion_length": 150.25967574119568,
1421
+ "epoch": 0.722183344996501,
1422
+ "grad_norm": 4.981149196624756,
1423
+ "learning_rate": 1e-06,
1424
+ "loss": 0.0,
1425
+ "reward": 0.7196180820465088,
1426
+ "reward_std": 0.1129911975003779,
1427
+ "rewards/semantic_entropy_math_reward": 0.7196180820465088,
1428
+ "step": 129
1429
+ },
1430
+ {
1431
+ "completion_length": 129.3750023841858,
1432
+ "epoch": 0.72778166550035,
1433
+ "grad_norm": 5.079310417175293,
1434
+ "learning_rate": 1e-06,
1435
+ "loss": 0.0,
1436
+ "reward": 0.7373512163758278,
1437
+ "reward_std": 0.10283095168415457,
1438
+ "rewards/semantic_entropy_math_reward": 0.7373512163758278,
1439
+ "step": 130
1440
+ },
1441
+ {
1442
+ "completion_length": 134.2485146522522,
1443
+ "epoch": 0.7333799860041987,
1444
+ "grad_norm": 5.917436599731445,
1445
+ "learning_rate": 1e-06,
1446
+ "loss": 0.0,
1447
+ "reward": 0.7232142984867096,
1448
+ "reward_std": 0.1203189252410084,
1449
+ "rewards/semantic_entropy_math_reward": 0.7232142984867096,
1450
+ "step": 131
1451
+ },
1452
+ {
1453
+ "completion_length": 107.0967288017273,
1454
+ "epoch": 0.7389783065080476,
1455
+ "grad_norm": 8.360037803649902,
1456
+ "learning_rate": 1e-06,
1457
+ "loss": 0.0,
1458
+ "reward": 0.7357390988618135,
1459
+ "reward_std": 0.10952205723151565,
1460
+ "rewards/semantic_entropy_math_reward": 0.7357390988618135,
1461
+ "step": 132
1462
+ },
1463
+ {
1464
+ "completion_length": 128.16592502593994,
1465
+ "epoch": 0.7445766270118964,
1466
+ "grad_norm": 7.823781490325928,
1467
+ "learning_rate": 1e-06,
1468
+ "loss": 0.0,
1469
+ "reward": 0.7218502275645733,
1470
+ "reward_std": 0.11286869831383228,
1471
+ "rewards/semantic_entropy_math_reward": 0.7218502275645733,
1472
+ "step": 133
1473
+ },
1474
+ {
1475
+ "completion_length": 106.69270944595337,
1476
+ "epoch": 0.7501749475157453,
1477
+ "grad_norm": 7.141363143920898,
1478
+ "learning_rate": 1e-06,
1479
+ "loss": 0.0,
1480
+ "reward": 0.7478918954730034,
1481
+ "reward_std": 0.10708213225007057,
1482
+ "rewards/semantic_entropy_math_reward": 0.7478918954730034,
1483
+ "step": 134
1484
+ },
1485
+ {
1486
+ "completion_length": 93.68006038665771,
1487
+ "epoch": 0.7557732680195941,
1488
+ "grad_norm": 5.968332767486572,
1489
+ "learning_rate": 1e-06,
1490
+ "loss": 0.0,
1491
+ "reward": 0.7960069701075554,
1492
+ "reward_std": 0.08689452323596925,
1493
+ "rewards/semantic_entropy_math_reward": 0.7960069701075554,
1494
+ "step": 135
1495
+ },
1496
+ {
1497
+ "completion_length": 90.8816967010498,
1498
+ "epoch": 0.761371588523443,
1499
+ "grad_norm": 4.925920486450195,
1500
+ "learning_rate": 1e-06,
1501
+ "loss": 0.0,
1502
+ "reward": 0.7625248245894909,
1503
+ "reward_std": 0.10826884070411325,
1504
+ "rewards/semantic_entropy_math_reward": 0.7625248245894909,
1505
+ "step": 136
1506
+ },
1507
+ {
1508
+ "completion_length": 106.96056652069092,
1509
+ "epoch": 0.7669699090272918,
1510
+ "grad_norm": 6.21066427230835,
1511
+ "learning_rate": 1e-06,
1512
+ "loss": 0.0,
1513
+ "reward": 0.7124256156384945,
1514
+ "reward_std": 0.12395848939195275,
1515
+ "rewards/semantic_entropy_math_reward": 0.7124256156384945,
1516
+ "step": 137
1517
+ },
1518
+ {
1519
+ "completion_length": 113.4546160697937,
1520
+ "epoch": 0.7725682295311407,
1521
+ "grad_norm": 4.43303108215332,
1522
+ "learning_rate": 1e-06,
1523
+ "loss": 0.0,
1524
+ "reward": 0.7115575522184372,
1525
+ "reward_std": 0.12078050547279418,
1526
+ "rewards/semantic_entropy_math_reward": 0.7115575522184372,
1527
+ "step": 138
1528
+ },
1529
+ {
1530
+ "completion_length": 140.69047927856445,
1531
+ "epoch": 0.7781665500349895,
1532
+ "grad_norm": 3.2451171875,
1533
+ "learning_rate": 1e-06,
1534
+ "loss": 0.0,
1535
+ "reward": 0.694320447742939,
1536
+ "reward_std": 0.1306492048315704,
1537
+ "rewards/semantic_entropy_math_reward": 0.694320447742939,
1538
+ "step": 139
1539
+ },
1540
+ {
1541
+ "completion_length": 142.95833444595337,
1542
+ "epoch": 0.7837648705388384,
1543
+ "grad_norm": 6.136082649230957,
1544
+ "learning_rate": 1e-06,
1545
+ "loss": 0.0,
1546
+ "reward": 0.7182539887726307,
1547
+ "reward_std": 0.15217532915994525,
1548
+ "rewards/semantic_entropy_math_reward": 0.7182539887726307,
1549
+ "step": 140
1550
+ },
1551
+ {
1552
+ "completion_length": 142.6287226676941,
1553
+ "epoch": 0.7893631910426872,
1554
+ "grad_norm": 2.892977476119995,
1555
+ "learning_rate": 1e-06,
1556
+ "loss": 0.0,
1557
+ "reward": 0.7118055783212185,
1558
+ "reward_std": 0.13839181000366807,
1559
+ "rewards/semantic_entropy_math_reward": 0.7118055783212185,
1560
+ "step": 141
1561
+ },
1562
+ {
1563
+ "completion_length": 157.30431842803955,
1564
+ "epoch": 0.794961511546536,
1565
+ "grad_norm": 4.3441619873046875,
1566
+ "learning_rate": 1e-06,
1567
+ "loss": 0.0,
1568
+ "reward": 0.6943204514682293,
1569
+ "reward_std": 0.14929893938824534,
1570
+ "rewards/semantic_entropy_math_reward": 0.6943204514682293,
1571
+ "step": 142
1572
+ },
1573
+ {
1574
+ "completion_length": 168.61533069610596,
1575
+ "epoch": 0.8005598320503848,
1576
+ "grad_norm": 1.7150957584381104,
1577
+ "learning_rate": 1e-06,
1578
+ "loss": 0.0,
1579
+ "reward": 0.686507947742939,
1580
+ "reward_std": 0.1514957039617002,
1581
+ "rewards/semantic_entropy_math_reward": 0.686507947742939,
1582
+ "step": 143
1583
+ },
1584
+ {
1585
+ "completion_length": 162.0543179512024,
1586
+ "epoch": 0.8061581525542337,
1587
+ "grad_norm": 1.2334315776824951,
1588
+ "learning_rate": 1e-06,
1589
+ "loss": 0.0,
1590
+ "reward": 0.6346726343035698,
1591
+ "reward_std": 0.14308730140328407,
1592
+ "rewards/semantic_entropy_math_reward": 0.6346726343035698,
1593
+ "step": 144
1594
+ },
1595
+ {
1596
+ "completion_length": 265.3921184539795,
1597
+ "epoch": 0.8117564730580826,
1598
+ "grad_norm": 2.8437411785125732,
1599
+ "learning_rate": 1e-06,
1600
+ "loss": 0.0,
1601
+ "reward": 0.5422867126762867,
1602
+ "reward_std": 0.17818555515259504,
1603
+ "rewards/semantic_entropy_math_reward": 0.5422867126762867,
1604
+ "step": 145
1605
+ },
1606
+ {
1607
+ "completion_length": 358.7314052581787,
1608
+ "epoch": 0.8173547935619314,
1609
+ "grad_norm": 1.9524171352386475,
1610
+ "learning_rate": 1e-06,
1611
+ "loss": 0.0,
1612
+ "reward": 0.4378720261156559,
1613
+ "reward_std": 0.194162187166512,
1614
+ "rewards/semantic_entropy_math_reward": 0.4378720261156559,
1615
+ "step": 146
1616
+ },
1617
+ {
1618
+ "completion_length": 268.26190662384033,
1619
+ "epoch": 0.8229531140657803,
1620
+ "grad_norm": 0.9712325930595398,
1621
+ "learning_rate": 1e-06,
1622
+ "loss": 0.0,
1623
+ "reward": 0.5739087481051683,
1624
+ "reward_std": 0.19231022708117962,
1625
+ "rewards/semantic_entropy_math_reward": 0.5739087481051683,
1626
+ "step": 147
1627
+ },
1628
+ {
1629
+ "completion_length": 247.8772373199463,
1630
+ "epoch": 0.8285514345696291,
1631
+ "grad_norm": 0.29546335339546204,
1632
+ "learning_rate": 1e-06,
1633
+ "loss": 0.0,
1634
+ "reward": 0.5668402947485447,
1635
+ "reward_std": 0.14728267351165414,
1636
+ "rewards/semantic_entropy_math_reward": 0.5668402947485447,
1637
+ "step": 148
1638
+ },
1639
+ {
1640
+ "completion_length": 286.6562547683716,
1641
+ "epoch": 0.834149755073478,
1642
+ "grad_norm": 0.19167956709861755,
1643
+ "learning_rate": 1e-06,
1644
+ "loss": 0.0,
1645
+ "reward": 0.5652281939983368,
1646
+ "reward_std": 0.16519550560042262,
1647
+ "rewards/semantic_entropy_math_reward": 0.5652281939983368,
1648
+ "step": 149
1649
+ },
1650
+ {
1651
+ "completion_length": 299.7745580673218,
1652
+ "epoch": 0.8397480755773268,
1653
+ "grad_norm": 0.16220510005950928,
1654
+ "learning_rate": 1e-06,
1655
+ "loss": 0.0,
1656
+ "reward": 0.563988110050559,
1657
+ "reward_std": 0.16206924617290497,
1658
+ "rewards/semantic_entropy_math_reward": 0.563988110050559,
1659
+ "step": 150
1660
+ },
1661
+ {
1662
+ "completion_length": 323.96875381469727,
1663
+ "epoch": 0.8453463960811757,
1664
+ "grad_norm": 0.11583292484283447,
1665
+ "learning_rate": 1e-06,
1666
+ "loss": 0.0,
1667
+ "reward": 0.5203373022377491,
1668
+ "reward_std": 0.17140463134273887,
1669
+ "rewards/semantic_entropy_math_reward": 0.5203373022377491,
1670
+ "step": 151
1671
+ },
1672
+ {
1673
+ "completion_length": 333.31994819641113,
1674
+ "epoch": 0.8509447165850245,
1675
+ "grad_norm": 0.09664779901504517,
1676
+ "learning_rate": 1e-06,
1677
+ "loss": 0.0,
1678
+ "reward": 0.503100199624896,
1679
+ "reward_std": 0.17030652752146125,
1680
+ "rewards/semantic_entropy_math_reward": 0.503100199624896,
1681
+ "step": 152
1682
+ },
1683
+ {
1684
+ "completion_length": 319.05804443359375,
1685
+ "epoch": 0.8565430370888734,
1686
+ "grad_norm": 0.08762703090906143,
1687
+ "learning_rate": 1e-06,
1688
+ "loss": 0.0,
1689
+ "reward": 0.5343502033501863,
1690
+ "reward_std": 0.1655228640884161,
1691
+ "rewards/semantic_entropy_math_reward": 0.5343502033501863,
1692
+ "step": 153
1693
+ },
1694
+ {
1695
+ "completion_length": 346.2261962890625,
1696
+ "epoch": 0.8621413575927221,
1697
+ "grad_norm": 0.07872086763381958,
1698
+ "learning_rate": 1e-06,
1699
+ "loss": 0.0,
1700
+ "reward": 0.5121527835726738,
1701
+ "reward_std": 0.15845369640737772,
1702
+ "rewards/semantic_entropy_math_reward": 0.5121527835726738,
1703
+ "step": 154
1704
+ },
1705
+ {
1706
+ "completion_length": 357.5372085571289,
1707
+ "epoch": 0.867739678096571,
1708
+ "grad_norm": 0.0757875069975853,
1709
+ "learning_rate": 1e-06,
1710
+ "loss": 0.0,
1711
+ "reward": 0.5648561716079712,
1712
+ "reward_std": 0.1613363642245531,
1713
+ "rewards/semantic_entropy_math_reward": 0.5648561716079712,
1714
+ "step": 155
1715
+ },
1716
+ {
1717
+ "completion_length": 353.3415222167969,
1718
+ "epoch": 0.8733379986004198,
1719
+ "grad_norm": 0.09012839198112488,
1720
+ "learning_rate": 1e-06,
1721
+ "loss": 0.0,
1722
+ "reward": 0.49231152422726154,
1723
+ "reward_std": 0.1589321019127965,
1724
+ "rewards/semantic_entropy_math_reward": 0.49231152422726154,
1725
+ "step": 156
1726
+ },
1727
+ {
1728
+ "completion_length": 354.72173500061035,
1729
+ "epoch": 0.8789363191042687,
1730
+ "grad_norm": 0.24965420365333557,
1731
+ "learning_rate": 1e-06,
1732
+ "loss": 0.0,
1733
+ "reward": 0.5529513861984015,
1734
+ "reward_std": 0.16521730786189437,
1735
+ "rewards/semantic_entropy_math_reward": 0.5529513861984015,
1736
+ "step": 157
1737
+ },
1738
+ {
1739
+ "completion_length": 348.0320014953613,
1740
+ "epoch": 0.8845346396081175,
1741
+ "grad_norm": 0.06840746104717255,
1742
+ "learning_rate": 1e-06,
1743
+ "loss": 0.0,
1744
+ "reward": 0.5613839365541935,
1745
+ "reward_std": 0.1481709172949195,
1746
+ "rewards/semantic_entropy_math_reward": 0.5613839365541935,
1747
+ "step": 158
1748
+ },
1749
+ {
1750
+ "completion_length": 350.35715103149414,
1751
+ "epoch": 0.8901329601119664,
1752
+ "grad_norm": 0.13703550398349762,
1753
+ "learning_rate": 1e-06,
1754
+ "loss": 0.0,
1755
+ "reward": 0.5834573470056057,
1756
+ "reward_std": 0.164536252617836,
1757
+ "rewards/semantic_entropy_math_reward": 0.5834573470056057,
1758
+ "step": 159
1759
+ },
1760
+ {
1761
+ "completion_length": 358.2440528869629,
1762
+ "epoch": 0.8957312806158153,
1763
+ "grad_norm": 0.09526026993989944,
1764
+ "learning_rate": 1e-06,
1765
+ "loss": 0.0,
1766
+ "reward": 0.5683283749967813,
1767
+ "reward_std": 0.14862215495668352,
1768
+ "rewards/semantic_entropy_math_reward": 0.5683283749967813,
1769
+ "step": 160
1770
+ },
1771
+ {
1772
+ "completion_length": 374.41518211364746,
1773
+ "epoch": 0.9013296011196641,
1774
+ "grad_norm": 0.09629662334918976,
1775
+ "learning_rate": 1e-06,
1776
+ "loss": 0.0,
1777
+ "reward": 0.5731646921485662,
1778
+ "reward_std": 0.15710427099838853,
1779
+ "rewards/semantic_entropy_math_reward": 0.5731646921485662,
1780
+ "step": 161
1781
+ },
1782
+ {
1783
+ "completion_length": 359.19643211364746,
1784
+ "epoch": 0.906927921623513,
1785
+ "grad_norm": 0.10354594886302948,
1786
+ "learning_rate": 1e-06,
1787
+ "loss": 0.0,
1788
+ "reward": 0.5668402947485447,
1789
+ "reward_std": 0.1442330675199628,
1790
+ "rewards/semantic_entropy_math_reward": 0.5668402947485447,
1791
+ "step": 162
1792
+ },
1793
+ {
1794
+ "completion_length": 363.5342330932617,
1795
+ "epoch": 0.9125262421273618,
1796
+ "grad_norm": 0.09924127161502838,
1797
+ "learning_rate": 1e-06,
1798
+ "loss": 0.0,
1799
+ "reward": 0.6071428768336773,
1800
+ "reward_std": 0.15955356042832136,
1801
+ "rewards/semantic_entropy_math_reward": 0.6071428768336773,
1802
+ "step": 163
1803
+ },
1804
+ {
1805
+ "completion_length": 344.59301376342773,
1806
+ "epoch": 0.9181245626312107,
1807
+ "grad_norm": 0.10809105634689331,
1808
+ "learning_rate": 1e-06,
1809
+ "loss": 0.0,
1810
+ "reward": 0.6684027947485447,
1811
+ "reward_std": 0.14918948477134109,
1812
+ "rewards/semantic_entropy_math_reward": 0.6684027947485447,
1813
+ "step": 164
1814
+ },
1815
+ {
1816
+ "completion_length": 361.6019401550293,
1817
+ "epoch": 0.9237228831350595,
1818
+ "grad_norm": 0.08925337344408035,
1819
+ "learning_rate": 1e-06,
1820
+ "loss": 0.0,
1821
+ "reward": 0.713541679084301,
1822
+ "reward_std": 0.14462757343426347,
1823
+ "rewards/semantic_entropy_math_reward": 0.713541679084301,
1824
+ "step": 165
1825
+ },
1826
+ {
1827
+ "completion_length": 358.32738876342773,
1828
+ "epoch": 0.9293212036389084,
1829
+ "grad_norm": 0.10002636164426804,
1830
+ "learning_rate": 1e-06,
1831
+ "loss": 0.0,
1832
+ "reward": 0.8019593358039856,
1833
+ "reward_std": 0.101166230160743,
1834
+ "rewards/semantic_entropy_math_reward": 0.8019593358039856,
1835
+ "step": 166
1836
+ },
1837
+ {
1838
+ "completion_length": 349.5200939178467,
1839
+ "epoch": 0.9349195241427571,
1840
+ "grad_norm": 0.0706295296549797,
1841
+ "learning_rate": 1e-06,
1842
+ "loss": 0.0,
1843
+ "reward": 0.8885168805718422,
1844
+ "reward_std": 0.0957920125219971,
1845
+ "rewards/semantic_entropy_math_reward": 0.8885168805718422,
1846
+ "step": 167
1847
+ },
1848
+ {
1849
+ "completion_length": 357.4538764953613,
1850
+ "epoch": 0.940517844646606,
1851
+ "grad_norm": 0.04306298866868019,
1852
+ "learning_rate": 1e-06,
1853
+ "loss": 0.0,
1854
+ "reward": 0.9391121119260788,
1855
+ "reward_std": 0.04490397102199495,
1856
+ "rewards/semantic_entropy_math_reward": 0.9391121119260788,
1857
+ "step": 168
1858
+ },
1859
+ {
1860
+ "completion_length": 344.33408546447754,
1861
+ "epoch": 0.9461161651504548,
1862
+ "grad_norm": 0.05653183534741402,
1863
+ "learning_rate": 1e-06,
1864
+ "loss": 0.0,
1865
+ "reward": 0.9515129216015339,
1866
+ "reward_std": 0.03180735826026648,
1867
+ "rewards/semantic_entropy_math_reward": 0.9515129216015339,
1868
+ "step": 169
1869
+ },
1870
+ {
1871
+ "completion_length": 344.59078216552734,
1872
+ "epoch": 0.9517144856543037,
1873
+ "grad_norm": 0.033216774463653564,
1874
+ "learning_rate": 1e-06,
1875
+ "loss": 0.0,
1876
+ "reward": 0.9871031977236271,
1877
+ "reward_std": 0.008495657471939921,
1878
+ "rewards/semantic_entropy_math_reward": 0.9871031977236271,
1879
+ "step": 170
1880
+ },
1881
+ {
1882
+ "completion_length": 346.92560386657715,
1883
+ "epoch": 0.9573128061581525,
1884
+ "grad_norm": 0.0,
1885
+ "learning_rate": 1e-06,
1886
+ "loss": 0.0,
1887
+ "reward": 1.0,
1888
+ "reward_std": 0.0,
1889
+ "rewards/semantic_entropy_math_reward": 1.0,
1890
+ "step": 171
1891
+ },
1892
+ {
1893
+ "completion_length": 357.2983684539795,
1894
+ "epoch": 0.9629111266620014,
1895
+ "grad_norm": 0.001862065983004868,
1896
+ "learning_rate": 1e-06,
1897
+ "loss": 0.0,
1898
+ "reward": 0.9986359179019928,
1899
+ "reward_std": 0.0021478806156665087,
1900
+ "rewards/semantic_entropy_math_reward": 0.9986359179019928,
1901
+ "step": 172
1902
+ },
1903
+ {
1904
+ "completion_length": 338.4322986602783,
1905
+ "epoch": 0.9685094471658502,
1906
+ "grad_norm": 0.0,
1907
+ "learning_rate": 1e-06,
1908
+ "loss": 0.0,
1909
+ "reward": 1.0,
1910
+ "reward_std": 0.0,
1911
+ "rewards/semantic_entropy_math_reward": 1.0,
1912
+ "step": 173
1913
+ },
1914
+ {
1915
+ "completion_length": 340.3586368560791,
1916
+ "epoch": 0.9741077676696991,
1917
+ "grad_norm": 0.0,
1918
+ "learning_rate": 1e-06,
1919
+ "loss": 0.0,
1920
+ "reward": 1.0,
1921
+ "reward_std": 0.0,
1922
+ "rewards/semantic_entropy_math_reward": 1.0,
1923
+ "step": 174
1924
+ },
1925
+ {
1926
+ "completion_length": 351.07218170166016,
1927
+ "epoch": 0.979706088173548,
1928
+ "grad_norm": 0.005613424815237522,
1929
+ "learning_rate": 1e-06,
1930
+ "loss": 0.0,
1931
+ "reward": 0.9986359179019928,
1932
+ "reward_std": 0.0021478806156665087,
1933
+ "rewards/semantic_entropy_math_reward": 0.9986359179019928,
1934
+ "step": 175
1935
+ },
1936
+ {
1937
+ "completion_length": 346.41890716552734,
1938
+ "epoch": 0.9853044086773968,
1939
+ "grad_norm": 0.0,
1940
+ "learning_rate": 1e-06,
1941
+ "loss": 0.0,
1942
+ "reward": 1.0,
1943
+ "reward_std": 0.0,
1944
+ "rewards/semantic_entropy_math_reward": 1.0,
1945
+ "step": 176
1946
+ },
1947
+ {
1948
+ "completion_length": 352.03422927856445,
1949
+ "epoch": 0.9909027291812457,
1950
+ "grad_norm": 0.0,
1951
+ "learning_rate": 1e-06,
1952
+ "loss": 0.0,
1953
+ "reward": 1.0,
1954
+ "reward_std": 0.0,
1955
+ "rewards/semantic_entropy_math_reward": 1.0,
1956
+ "step": 177
1957
+ },
1958
+ {
1959
+ "completion_length": 361.8452491760254,
1960
+ "epoch": 0.9965010496850945,
1961
+ "grad_norm": 0.008886351250112057,
1962
+ "learning_rate": 1e-06,
1963
+ "loss": 0.0,
1964
+ "reward": 0.9972718358039856,
1965
+ "reward_std": 0.004295761231333017,
1966
+ "rewards/semantic_entropy_math_reward": 0.9972718358039856,
1967
+ "step": 178
1968
+ },
1969
+ {
1970
+ "epoch": 0.9965010496850945,
1971
+ "step": 178,
1972
+ "total_flos": 0.0,
1973
+ "train_loss": 0.0,
1974
+ "train_runtime": 5.6459,
1975
+ "train_samples_per_second": 3542.367,
1976
+ "train_steps_per_second": 31.527
1977
+ }
1978
+ ],
1979
+ "logging_steps": 1,
1980
+ "max_steps": 178,
1981
+ "num_input_tokens_seen": 0,
1982
+ "num_train_epochs": 1,
1983
+ "save_steps": 10,
1984
+ "stateful_callbacks": {
1985
+ "TrainerControl": {
1986
+ "args": {
1987
+ "should_epoch_stop": false,
1988
+ "should_evaluate": false,
1989
+ "should_log": false,
1990
+ "should_save": true,
1991
+ "should_training_stop": true
1992
+ },
1993
+ "attributes": {}
1994
+ }
1995
+ },
1996
+ "total_flos": 0.0,
1997
+ "train_batch_size": 1,
1998
+ "trial_name": null,
1999
+ "trial_params": null
2000
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abf7a07781912b3d1855e1a3426e4bc8a18f69e765bd5e4fddc8295558bcf7e5
3
  size 7544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cbc2f1fcd93416b00074f789a754dbedee2c540619a96b86f592a68e1434238
3
  size 7544