li-muyang commited on
Commit
de4a3e9
·
verified ·
1 Parent(s): 36b92c8

Model save

Browse files
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - trl
5
+ - dpo
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: zephyr-8b-dpo-full
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # zephyr-8b-dpo-full
16
+
17
+ This model was trained from scratch on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.5131
20
+ - Rewards/chosen: -0.6905
21
+ - Rewards/rejected: -1.5128
22
+ - Rewards/accuracies: 0.7659
23
+ - Rewards/margins: 0.8223
24
+ - Logps/rejected: -432.5991
25
+ - Logps/chosen: -374.1649
26
+ - Logits/rejected: 0.2624
27
+ - Logits/chosen: -0.0136
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-07
47
+ - train_batch_size: 4
48
+ - eval_batch_size: 4
49
+ - seed: 42
50
+ - distributed_type: multi-GPU
51
+ - num_devices: 8
52
+ - gradient_accumulation_steps: 2
53
+ - total_train_batch_size: 64
54
+ - total_eval_batch_size: 32
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 1
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.6766 | 0.1047 | 100 | 0.6765 | 0.0370 | 0.0004 | 0.7103 | 0.0366 | -281.2796 | -301.4119 | -0.6479 | -0.7839 |
65
+ | 0.6043 | 0.2093 | 200 | 0.5948 | -0.4434 | -0.7461 | 0.7262 | 0.3027 | -355.9276 | -349.4549 | -0.2261 | -0.3967 |
66
+ | 0.56 | 0.3140 | 300 | 0.5452 | -0.7062 | -1.2683 | 0.7222 | 0.5621 | -408.1486 | -375.7269 | -0.0802 | -0.2757 |
67
+ | 0.5508 | 0.4186 | 400 | 0.5345 | -0.5684 | -1.2522 | 0.7421 | 0.6837 | -406.5327 | -361.9530 | -0.0946 | -0.2906 |
68
+ | 0.5182 | 0.5233 | 500 | 0.5286 | -0.9677 | -1.6478 | 0.7262 | 0.6801 | -446.0949 | -401.8783 | 0.2622 | 0.0104 |
69
+ | 0.4936 | 0.6279 | 600 | 0.5255 | -0.5650 | -1.3792 | 0.7778 | 0.8143 | -419.2403 | -361.6075 | 0.1520 | -0.1091 |
70
+ | 0.4967 | 0.7326 | 700 | 0.5153 | -0.5682 | -1.3802 | 0.7698 | 0.8121 | -419.3397 | -361.9271 | 0.1898 | -0.0745 |
71
+ | 0.5013 | 0.8373 | 800 | 0.5137 | -0.6747 | -1.4796 | 0.7659 | 0.8049 | -429.2723 | -372.5779 | 0.2384 | -0.0307 |
72
+ | 0.4983 | 0.9419 | 900 | 0.5131 | -0.6905 | -1.5128 | 0.7659 | 0.8223 | -432.5991 | -374.1649 | 0.2624 | -0.0136 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.45.2
78
+ - Pytorch 2.2.2+rocm5.7
79
+ - Datasets 3.2.0
80
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9994767137624281,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5504645167845081,
5
+ "train_runtime": 19044.9678,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 3.21,
8
+ "train_steps_per_second": 0.05
9
+ }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "data/zephyr-8b-sft-full/checkpoint-1100",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "data/zephyr-8b-sft-full/checkpoint-1000",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 128000,
4
+ "do_sample": true,
5
+ "eos_token_id": 128001,
6
+ "temperature": 0.6,
7
+ "top_p": 0.9,
8
+ "transformers_version": "4.45.2"
9
+ }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d395950b1807f823c9511e80261b53f408797893a844468d33b475567a511c4
3
  size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b832847c8f68222e58779a9d20be821bd29622ecf0e3f1c8c64372d7ed98ef
3
  size 4976698672
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a19d45dee4a68b3bea5890adcb6e275922b86c460e2fd77fb5852d89fb607c9f
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f67c4355da2348a95f9ad045da82a74f34466a28ee0fea26baa8ced84234fd66
3
  size 4999802720
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98cf6b69f5afeb21806038669d295488743dc3528fac2e6a80b40251bcb4f8e8
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f45dde4c599b65fba248826e93795e0f68c5ee102ffb90d909a2c47f52ed518
3
  size 4915916176
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fcd0d252f2d632eef8802b67a78362607f77df957e70b31032d8c97c6dd1422
3
  size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0afba220958a77d77843c0178cd2789a2d9318a78356e2bb886fb867352160b1
3
  size 1168138808
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9994767137624281,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5504645167845081,
5
+ "train_runtime": 19044.9678,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 3.21,
8
+ "train_steps_per_second": 0.05
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9994767137624281,
5
+ "eval_steps": 100,
6
+ "global_step": 955,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0010465724751439038,
13
+ "grad_norm": 5.451860785513885,
14
+ "learning_rate": 5.208333333333333e-09,
15
+ "logits/chosen": -1.0643162727355957,
16
+ "logits/rejected": -0.861487865447998,
17
+ "logps/chosen": -457.7091064453125,
18
+ "logps/rejected": -371.2662658691406,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.010465724751439037,
28
+ "grad_norm": 4.7663255162271385,
29
+ "learning_rate": 5.208333333333333e-08,
30
+ "logits/chosen": -0.7333142161369324,
31
+ "logits/rejected": -0.7134076356887817,
32
+ "logps/chosen": -301.405517578125,
33
+ "logps/rejected": -279.814697265625,
34
+ "loss": 0.6936,
35
+ "rewards/accuracies": 0.4166666567325592,
36
+ "rewards/chosen": -0.0006206975085660815,
37
+ "rewards/margins": -0.0014212429523468018,
38
+ "rewards/rejected": 0.0008005454437807202,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.020931449502878074,
43
+ "grad_norm": 5.112414364557664,
44
+ "learning_rate": 1.0416666666666667e-07,
45
+ "logits/chosen": -0.7887303233146667,
46
+ "logits/rejected": -0.6987150311470032,
47
+ "logps/chosen": -320.1136779785156,
48
+ "logps/rejected": -308.3643798828125,
49
+ "loss": 0.6929,
50
+ "rewards/accuracies": 0.550000011920929,
51
+ "rewards/chosen": 0.0004412093257997185,
52
+ "rewards/margins": 0.001130643649958074,
53
+ "rewards/rejected": -0.000689434411469847,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.03139717425431711,
58
+ "grad_norm": 4.99364758669278,
59
+ "learning_rate": 1.5624999999999999e-07,
60
+ "logits/chosen": -0.8625659942626953,
61
+ "logits/rejected": -0.7310890555381775,
62
+ "logps/chosen": -304.8927307128906,
63
+ "logps/rejected": -282.91070556640625,
64
+ "loss": 0.6931,
65
+ "rewards/accuracies": 0.5375000238418579,
66
+ "rewards/chosen": 0.00022493649157695472,
67
+ "rewards/margins": 0.0005593249225057662,
68
+ "rewards/rejected": -0.00033438848913647234,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.04186289900575615,
73
+ "grad_norm": 5.223938734516328,
74
+ "learning_rate": 2.0833333333333333e-07,
75
+ "logits/chosen": -0.8336359858512878,
76
+ "logits/rejected": -0.7138947248458862,
77
+ "logps/chosen": -346.45550537109375,
78
+ "logps/rejected": -302.9964904785156,
79
+ "loss": 0.6926,
80
+ "rewards/accuracies": 0.5,
81
+ "rewards/chosen": 0.0026509915478527546,
82
+ "rewards/margins": 0.001462576794438064,
83
+ "rewards/rejected": 0.0011884148698300123,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.052328623757195186,
88
+ "grad_norm": 4.722375406184354,
89
+ "learning_rate": 2.604166666666667e-07,
90
+ "logits/chosen": -0.7795863151550293,
91
+ "logits/rejected": -0.6525732278823853,
92
+ "logps/chosen": -257.31060791015625,
93
+ "logps/rejected": -221.70748901367188,
94
+ "loss": 0.6922,
95
+ "rewards/accuracies": 0.6625000238418579,
96
+ "rewards/chosen": 0.0048319087363779545,
97
+ "rewards/margins": 0.0019413300324231386,
98
+ "rewards/rejected": 0.002890578703954816,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.06279434850863422,
103
+ "grad_norm": 5.075388680101497,
104
+ "learning_rate": 3.1249999999999997e-07,
105
+ "logits/chosen": -0.8222079277038574,
106
+ "logits/rejected": -0.7359899282455444,
107
+ "logps/chosen": -345.0970153808594,
108
+ "logps/rejected": -300.33770751953125,
109
+ "loss": 0.6911,
110
+ "rewards/accuracies": 0.612500011920929,
111
+ "rewards/chosen": 0.011030884459614754,
112
+ "rewards/margins": 0.004350438714027405,
113
+ "rewards/rejected": 0.006680445279926062,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.07326007326007326,
118
+ "grad_norm": 5.046679615665253,
119
+ "learning_rate": 3.645833333333333e-07,
120
+ "logits/chosen": -0.6597806811332703,
121
+ "logits/rejected": -0.6361770629882812,
122
+ "logps/chosen": -260.9971618652344,
123
+ "logps/rejected": -286.46697998046875,
124
+ "loss": 0.6894,
125
+ "rewards/accuracies": 0.6625000238418579,
126
+ "rewards/chosen": 0.013305026106536388,
127
+ "rewards/margins": 0.007203704211860895,
128
+ "rewards/rejected": 0.0061013223603367805,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.0837257980115123,
133
+ "grad_norm": 4.97721377364173,
134
+ "learning_rate": 4.1666666666666667e-07,
135
+ "logits/chosen": -0.758665144443512,
136
+ "logits/rejected": -0.598028302192688,
137
+ "logps/chosen": -276.90338134765625,
138
+ "logps/rejected": -261.9596862792969,
139
+ "loss": 0.6862,
140
+ "rewards/accuracies": 0.6625000238418579,
141
+ "rewards/chosen": 0.022657250985503197,
142
+ "rewards/margins": 0.012633631005883217,
143
+ "rewards/rejected": 0.010023623704910278,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.09419152276295134,
148
+ "grad_norm": 4.157558262116518,
149
+ "learning_rate": 4.6874999999999996e-07,
150
+ "logits/chosen": -0.6880342364311218,
151
+ "logits/rejected": -0.6242440342903137,
152
+ "logps/chosen": -298.48590087890625,
153
+ "logps/rejected": -276.1928405761719,
154
+ "loss": 0.684,
155
+ "rewards/accuracies": 0.6499999761581421,
156
+ "rewards/chosen": 0.026639845222234726,
157
+ "rewards/margins": 0.013844798319041729,
158
+ "rewards/rejected": 0.012795047834515572,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.10465724751439037,
163
+ "grad_norm": 4.859970034492583,
164
+ "learning_rate": 4.999732492681437e-07,
165
+ "logits/chosen": -0.7400780916213989,
166
+ "logits/rejected": -0.5942438840866089,
167
+ "logps/chosen": -298.5497131347656,
168
+ "logps/rejected": -291.4864196777344,
169
+ "loss": 0.6766,
170
+ "rewards/accuracies": 0.7124999761581421,
171
+ "rewards/chosen": 0.036215804517269135,
172
+ "rewards/margins": 0.034676693379879,
173
+ "rewards/rejected": 0.001539108925499022,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.10465724751439037,
178
+ "eval_logits/chosen": -0.7839179039001465,
179
+ "eval_logits/rejected": -0.6478830575942993,
180
+ "eval_logps/chosen": -301.41192626953125,
181
+ "eval_logps/rejected": -281.27960205078125,
182
+ "eval_loss": 0.676456093788147,
183
+ "eval_rewards/accuracies": 0.7103174328804016,
184
+ "eval_rewards/chosen": 0.036981064826250076,
185
+ "eval_rewards/margins": 0.03660787642002106,
186
+ "eval_rewards/rejected": 0.0003731876495294273,
187
+ "eval_runtime": 229.5969,
188
+ "eval_samples_per_second": 8.711,
189
+ "eval_steps_per_second": 0.274,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 0.1151229722658294,
194
+ "grad_norm": 5.0565000677565495,
195
+ "learning_rate": 4.996723692767926e-07,
196
+ "logits/chosen": -0.7310239672660828,
197
+ "logits/rejected": -0.6270194053649902,
198
+ "logps/chosen": -276.677490234375,
199
+ "logps/rejected": -242.0689239501953,
200
+ "loss": 0.6743,
201
+ "rewards/accuracies": 0.675000011920929,
202
+ "rewards/chosen": 0.04216315224766731,
203
+ "rewards/margins": 0.0578165240585804,
204
+ "rewards/rejected": -0.015653371810913086,
205
+ "step": 110
206
+ },
207
+ {
208
+ "epoch": 0.12558869701726844,
209
+ "grad_norm": 5.236113971991537,
210
+ "learning_rate": 4.990375746213598e-07,
211
+ "logits/chosen": -0.7704837322235107,
212
+ "logits/rejected": -0.7006691098213196,
213
+ "logps/chosen": -253.9123992919922,
214
+ "logps/rejected": -279.79742431640625,
215
+ "loss": 0.6605,
216
+ "rewards/accuracies": 0.762499988079071,
217
+ "rewards/chosen": 0.021229851990938187,
218
+ "rewards/margins": 0.06761517375707626,
219
+ "rewards/rejected": -0.04638532176613808,
220
+ "step": 120
221
+ },
222
+ {
223
+ "epoch": 0.1360544217687075,
224
+ "grad_norm": 5.848718385521063,
225
+ "learning_rate": 4.980697142834314e-07,
226
+ "logits/chosen": -0.7273018956184387,
227
+ "logits/rejected": -0.6581666469573975,
228
+ "logps/chosen": -309.36895751953125,
229
+ "logps/rejected": -300.0992126464844,
230
+ "loss": 0.6491,
231
+ "rewards/accuracies": 0.7124999761581421,
232
+ "rewards/chosen": -0.043165743350982666,
233
+ "rewards/margins": 0.11292536556720734,
234
+ "rewards/rejected": -0.1560911238193512,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.14652014652014653,
239
+ "grad_norm": 7.55656728427718,
240
+ "learning_rate": 4.967700826904229e-07,
241
+ "logits/chosen": -0.5881147384643555,
242
+ "logits/rejected": -0.5260018110275269,
243
+ "logps/chosen": -323.1036071777344,
244
+ "logps/rejected": -325.988037109375,
245
+ "loss": 0.6403,
246
+ "rewards/accuracies": 0.7124999761581421,
247
+ "rewards/chosen": -0.07382384687662125,
248
+ "rewards/margins": 0.09867590665817261,
249
+ "rewards/rejected": -0.17249977588653564,
250
+ "step": 140
251
+ },
252
+ {
253
+ "epoch": 0.15698587127158556,
254
+ "grad_norm": 5.993400229281012,
255
+ "learning_rate": 4.951404179843962e-07,
256
+ "logits/chosen": -0.8269667625427246,
257
+ "logits/rejected": -0.6397506594657898,
258
+ "logps/chosen": -338.58087158203125,
259
+ "logps/rejected": -282.53472900390625,
260
+ "loss": 0.6354,
261
+ "rewards/accuracies": 0.75,
262
+ "rewards/chosen": -0.023351237177848816,
263
+ "rewards/margins": 0.14395873248577118,
264
+ "rewards/rejected": -0.1673099845647812,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.1674515960230246,
269
+ "grad_norm": 6.52774166199853,
270
+ "learning_rate": 4.931828996974498e-07,
271
+ "logits/chosen": -0.7941682934761047,
272
+ "logits/rejected": -0.5335707068443298,
273
+ "logps/chosen": -351.42047119140625,
274
+ "logps/rejected": -295.7675476074219,
275
+ "loss": 0.6148,
276
+ "rewards/accuracies": 0.7875000238418579,
277
+ "rewards/chosen": -0.02929142490029335,
278
+ "rewards/margins": 0.2560710310935974,
279
+ "rewards/rejected": -0.28536245226860046,
280
+ "step": 160
281
+ },
282
+ {
283
+ "epoch": 0.17791732077446362,
284
+ "grad_norm": 7.419772604550085,
285
+ "learning_rate": 4.909001458367866e-07,
286
+ "logits/chosen": -0.6290043592453003,
287
+ "logits/rejected": -0.50290447473526,
288
+ "logps/chosen": -297.24652099609375,
289
+ "logps/rejected": -316.2005615234375,
290
+ "loss": 0.6028,
291
+ "rewards/accuracies": 0.737500011920929,
292
+ "rewards/chosen": -0.14729979634284973,
293
+ "rewards/margins": 0.2713707983493805,
294
+ "rewards/rejected": -0.4186705946922302,
295
+ "step": 170
296
+ },
297
+ {
298
+ "epoch": 0.18838304552590268,
299
+ "grad_norm": 8.219451889017629,
300
+ "learning_rate": 4.882952093833627e-07,
301
+ "logits/chosen": -0.695152759552002,
302
+ "logits/rejected": -0.6777328848838806,
303
+ "logps/chosen": -266.3822937011719,
304
+ "logps/rejected": -305.38055419921875,
305
+ "loss": 0.5993,
306
+ "rewards/accuracies": 0.699999988079071,
307
+ "rewards/chosen": -0.1685328185558319,
308
+ "rewards/margins": 0.23227596282958984,
309
+ "rewards/rejected": -0.40080875158309937,
310
+ "step": 180
311
+ },
312
+ {
313
+ "epoch": 0.1988487702773417,
314
+ "grad_norm": 14.401160470357306,
315
+ "learning_rate": 4.853715742087946e-07,
316
+ "logits/chosen": -0.5633553266525269,
317
+ "logits/rejected": -0.4553346037864685,
318
+ "logps/chosen": -299.26141357421875,
319
+ "logps/rejected": -383.54583740234375,
320
+ "loss": 0.5981,
321
+ "rewards/accuracies": 0.800000011920929,
322
+ "rewards/chosen": -0.2618769407272339,
323
+ "rewards/margins": 0.3539055287837982,
324
+ "rewards/rejected": -0.6157824397087097,
325
+ "step": 190
326
+ },
327
+ {
328
+ "epoch": 0.20931449502878074,
329
+ "grad_norm": 9.77874906688374,
330
+ "learning_rate": 4.821331504159906e-07,
331
+ "logits/chosen": -0.6467507481575012,
332
+ "logits/rejected": -0.34418243169784546,
333
+ "logps/chosen": -381.84051513671875,
334
+ "logps/rejected": -324.20941162109375,
335
+ "loss": 0.6043,
336
+ "rewards/accuracies": 0.7124999761581421,
337
+ "rewards/chosen": -0.30156201124191284,
338
+ "rewards/margins": 0.31678324937820435,
339
+ "rewards/rejected": -0.6183452606201172,
340
+ "step": 200
341
+ },
342
+ {
343
+ "epoch": 0.20931449502878074,
344
+ "eval_logits/chosen": -0.3967326581478119,
345
+ "eval_logits/rejected": -0.22607146203517914,
346
+ "eval_logps/chosen": -349.45489501953125,
347
+ "eval_logps/rejected": -355.9275817871094,
348
+ "eval_loss": 0.5947719812393188,
349
+ "eval_rewards/accuracies": 0.726190447807312,
350
+ "eval_rewards/chosen": -0.44344887137413025,
351
+ "eval_rewards/margins": 0.302657812833786,
352
+ "eval_rewards/rejected": -0.7461066246032715,
353
+ "eval_runtime": 229.0804,
354
+ "eval_samples_per_second": 8.731,
355
+ "eval_steps_per_second": 0.275,
356
+ "step": 200
357
+ },
358
+ {
359
+ "epoch": 0.21978021978021978,
360
+ "grad_norm": 10.798066517209035,
361
+ "learning_rate": 4.785842691097342e-07,
362
+ "logits/chosen": -0.3766190707683563,
363
+ "logits/rejected": -0.259346067905426,
364
+ "logps/chosen": -337.4864196777344,
365
+ "logps/rejected": -380.99993896484375,
366
+ "loss": 0.5964,
367
+ "rewards/accuracies": 0.625,
368
+ "rewards/chosen": -0.39307016134262085,
369
+ "rewards/margins": 0.23509669303894043,
370
+ "rewards/rejected": -0.6281668543815613,
371
+ "step": 210
372
+ },
373
+ {
374
+ "epoch": 0.2302459445316588,
375
+ "grad_norm": 8.703797087912555,
376
+ "learning_rate": 4.7472967660421603e-07,
377
+ "logits/chosen": -0.5316035151481628,
378
+ "logits/rejected": -0.3284838795661926,
379
+ "logps/chosen": -331.7840270996094,
380
+ "logps/rejected": -323.8965759277344,
381
+ "loss": 0.5891,
382
+ "rewards/accuracies": 0.699999988079071,
383
+ "rewards/chosen": -0.2757546603679657,
384
+ "rewards/margins": 0.4368220865726471,
385
+ "rewards/rejected": -0.7125767469406128,
386
+ "step": 220
387
+ },
388
+ {
389
+ "epoch": 0.24071166928309787,
390
+ "grad_norm": 10.116764545045047,
391
+ "learning_rate": 4.705745280752585e-07,
392
+ "logits/chosen": -0.4472675323486328,
393
+ "logits/rejected": -0.36385267972946167,
394
+ "logps/chosen": -318.5218200683594,
395
+ "logps/rejected": -355.2652893066406,
396
+ "loss": 0.5789,
397
+ "rewards/accuracies": 0.675000011920929,
398
+ "rewards/chosen": -0.3962463438510895,
399
+ "rewards/margins": 0.3614889681339264,
400
+ "rewards/rejected": -0.7577352523803711,
401
+ "step": 230
402
+ },
403
+ {
404
+ "epoch": 0.25117739403453687,
405
+ "grad_norm": 9.428286192810406,
406
+ "learning_rate": 4.6612438066572555e-07,
407
+ "logits/chosen": -0.3501998782157898,
408
+ "logits/rejected": -0.2727964520454407,
409
+ "logps/chosen": -312.61175537109375,
410
+ "logps/rejected": -358.1639099121094,
411
+ "loss": 0.5635,
412
+ "rewards/accuracies": 0.75,
413
+ "rewards/chosen": -0.3030812740325928,
414
+ "rewards/margins": 0.5171294212341309,
415
+ "rewards/rejected": -0.8202105760574341,
416
+ "step": 240
417
+ },
418
+ {
419
+ "epoch": 0.2616431187859759,
420
+ "grad_norm": 11.893713587854874,
421
+ "learning_rate": 4.6138518605333664e-07,
422
+ "logits/chosen": -0.3740696310997009,
423
+ "logits/rejected": -0.3431631624698639,
424
+ "logps/chosen": -318.5814514160156,
425
+ "logps/rejected": -364.1845703125,
426
+ "loss": 0.5365,
427
+ "rewards/accuracies": 0.7875000238418579,
428
+ "rewards/chosen": -0.29904454946517944,
429
+ "rewards/margins": 0.49876856803894043,
430
+ "rewards/rejected": -0.7978130578994751,
431
+ "step": 250
432
+ },
433
+ {
434
+ "epoch": 0.272108843537415,
435
+ "grad_norm": 10.66317821656739,
436
+ "learning_rate": 4.5636328249082514e-07,
437
+ "logits/chosen": -0.5066936612129211,
438
+ "logits/rejected": -0.2627994120121002,
439
+ "logps/chosen": -326.15496826171875,
440
+ "logps/rejected": -359.80242919921875,
441
+ "loss": 0.5521,
442
+ "rewards/accuracies": 0.699999988079071,
443
+ "rewards/chosen": -0.2969462275505066,
444
+ "rewards/margins": 0.45207586884498596,
445
+ "rewards/rejected": -0.7490221261978149,
446
+ "step": 260
447
+ },
448
+ {
449
+ "epoch": 0.282574568288854,
450
+ "grad_norm": 15.074175115734294,
451
+ "learning_rate": 4.510653863290871e-07,
452
+ "logits/chosen": -0.3418571949005127,
453
+ "logits/rejected": -0.21116261184215546,
454
+ "logps/chosen": -371.8050842285156,
455
+ "logps/rejected": -365.5683288574219,
456
+ "loss": 0.5354,
457
+ "rewards/accuracies": 0.7124999761581421,
458
+ "rewards/chosen": -0.45582056045532227,
459
+ "rewards/margins": 0.5173171758651733,
460
+ "rewards/rejected": -0.9731376767158508,
461
+ "step": 270
462
+ },
463
+ {
464
+ "epoch": 0.29304029304029305,
465
+ "grad_norm": 11.47127961963256,
466
+ "learning_rate": 4.4549858303465737e-07,
467
+ "logits/chosen": -0.45343008637428284,
468
+ "logits/rejected": -0.21792340278625488,
469
+ "logps/chosen": -384.6351013183594,
470
+ "logps/rejected": -404.1702880859375,
471
+ "loss": 0.5688,
472
+ "rewards/accuracies": 0.762499988079071,
473
+ "rewards/chosen": -0.4517960548400879,
474
+ "rewards/margins": 0.690081775188446,
475
+ "rewards/rejected": -1.1418778896331787,
476
+ "step": 280
477
+ },
478
+ {
479
+ "epoch": 0.3035060177917321,
480
+ "grad_norm": 10.944507091993055,
481
+ "learning_rate": 4.396703177135261e-07,
482
+ "logits/chosen": -0.5740801692008972,
483
+ "logits/rejected": -0.3048514723777771,
484
+ "logps/chosen": -362.6668395996094,
485
+ "logps/rejected": -351.4581604003906,
486
+ "loss": 0.5486,
487
+ "rewards/accuracies": 0.75,
488
+ "rewards/chosen": -0.42820802330970764,
489
+ "rewards/margins": 0.5140999555587769,
490
+ "rewards/rejected": -0.9423079490661621,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 0.3139717425431711,
495
+ "grad_norm": 15.202455626347957,
496
+ "learning_rate": 4.335883851539693e-07,
497
+ "logits/chosen": -0.24740684032440186,
498
+ "logits/rejected": -0.16567502915859222,
499
+ "logps/chosen": -326.53814697265625,
500
+ "logps/rejected": -362.52337646484375,
501
+ "loss": 0.56,
502
+ "rewards/accuracies": 0.6499999761581421,
503
+ "rewards/chosen": -0.613074779510498,
504
+ "rewards/margins": 0.3882750868797302,
505
+ "rewards/rejected": -1.0013500452041626,
506
+ "step": 300
507
+ },
508
+ {
509
+ "epoch": 0.3139717425431711,
510
+ "eval_logits/chosen": -0.27571603655815125,
511
+ "eval_logits/rejected": -0.08016987144947052,
512
+ "eval_logps/chosen": -375.7269287109375,
513
+ "eval_logps/rejected": -408.14862060546875,
514
+ "eval_loss": 0.5452203154563904,
515
+ "eval_rewards/accuracies": 0.7222222089767456,
516
+ "eval_rewards/chosen": -0.706169068813324,
517
+ "eval_rewards/margins": 0.5621480941772461,
518
+ "eval_rewards/rejected": -1.2683171033859253,
519
+ "eval_runtime": 228.6878,
520
+ "eval_samples_per_second": 8.746,
521
+ "eval_steps_per_second": 0.275,
522
+ "step": 300
523
+ },
524
+ {
525
+ "epoch": 0.32443746729461015,
526
+ "grad_norm": 13.811587080938947,
527
+ "learning_rate": 4.272609194017105e-07,
528
+ "logits/chosen": -0.4527352452278137,
529
+ "logits/rejected": -0.062251877039670944,
530
+ "logps/chosen": -355.1481628417969,
531
+ "logps/rejected": -345.7646484375,
532
+ "loss": 0.5359,
533
+ "rewards/accuracies": 0.737500011920929,
534
+ "rewards/chosen": -0.5272838473320007,
535
+ "rewards/margins": 0.645318329334259,
536
+ "rewards/rejected": -1.1726022958755493,
537
+ "step": 310
538
+ },
539
+ {
540
+ "epoch": 0.3349031920460492,
541
+ "grad_norm": 12.089739461632615,
542
+ "learning_rate": 4.2069638288135547e-07,
543
+ "logits/chosen": -0.3022197186946869,
544
+ "logits/rejected": -0.22582903504371643,
545
+ "logps/chosen": -320.36968994140625,
546
+ "logps/rejected": -406.35565185546875,
547
+ "loss": 0.5448,
548
+ "rewards/accuracies": 0.7124999761581421,
549
+ "rewards/chosen": -0.435504674911499,
550
+ "rewards/margins": 0.6115713715553284,
551
+ "rewards/rejected": -1.0470759868621826,
552
+ "step": 320
553
+ },
554
+ {
555
+ "epoch": 0.3453689167974882,
556
+ "grad_norm": 16.48349473214006,
557
+ "learning_rate": 4.139035550786494e-07,
558
+ "logits/chosen": -0.10828112065792084,
559
+ "logits/rejected": 0.048714399337768555,
560
+ "logps/chosen": -354.105712890625,
561
+ "logps/rejected": -383.7474060058594,
562
+ "loss": 0.5535,
563
+ "rewards/accuracies": 0.675000011920929,
564
+ "rewards/chosen": -0.7812685966491699,
565
+ "rewards/margins": 0.4748496413230896,
566
+ "rewards/rejected": -1.2561182975769043,
567
+ "step": 330
568
+ },
569
+ {
570
+ "epoch": 0.35583464154892724,
571
+ "grad_norm": 9.897253624611022,
572
+ "learning_rate": 4.0689152079869306e-07,
573
+ "logits/chosen": -0.14965051412582397,
574
+ "logits/rejected": 0.014736655168235302,
575
+ "logps/chosen": -307.13482666015625,
576
+ "logps/rejected": -319.986083984375,
577
+ "loss": 0.5461,
578
+ "rewards/accuracies": 0.6499999761581421,
579
+ "rewards/chosen": -0.4124748110771179,
580
+ "rewards/margins": 0.4596773087978363,
581
+ "rewards/rejected": -0.8721522092819214,
582
+ "step": 340
583
+ },
584
+ {
585
+ "epoch": 0.3663003663003663,
586
+ "grad_norm": 10.612437127371589,
587
+ "learning_rate": 3.99669658015821e-07,
588
+ "logits/chosen": -0.07019379734992981,
589
+ "logits/rejected": 0.12156897783279419,
590
+ "logps/chosen": -348.1796875,
591
+ "logps/rejected": -370.327880859375,
592
+ "loss": 0.5437,
593
+ "rewards/accuracies": 0.6000000238418579,
594
+ "rewards/chosen": -0.7881641983985901,
595
+ "rewards/margins": 0.43442434072494507,
596
+ "rewards/rejected": -1.2225885391235352,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 0.37676609105180536,
601
+ "grad_norm": 14.930745956052746,
602
+ "learning_rate": 3.92247625331392e-07,
603
+ "logits/chosen": -0.120775006711483,
604
+ "logits/rejected": -0.008257830515503883,
605
+ "logps/chosen": -383.41534423828125,
606
+ "logps/rejected": -410.1376953125,
607
+ "loss": 0.5203,
608
+ "rewards/accuracies": 0.800000011920929,
609
+ "rewards/chosen": -0.8889461755752563,
610
+ "rewards/margins": 0.48123469948768616,
611
+ "rewards/rejected": -1.3701808452606201,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 0.3872318158032444,
616
+ "grad_norm": 14.188357046119185,
617
+ "learning_rate": 3.846353490562664e-07,
618
+ "logits/chosen": -0.21186837553977966,
619
+ "logits/rejected": -0.18856562674045563,
620
+ "logps/chosen": -341.7081604003906,
621
+ "logps/rejected": -380.34039306640625,
622
+ "loss": 0.5258,
623
+ "rewards/accuracies": 0.6499999761581421,
624
+ "rewards/chosen": -0.6658584475517273,
625
+ "rewards/margins": 0.510163426399231,
626
+ "rewards/rejected": -1.1760218143463135,
627
+ "step": 370
628
+ },
629
+ {
630
+ "epoch": 0.3976975405546834,
631
+ "grad_norm": 17.153113991725967,
632
+ "learning_rate": 3.768430099352445e-07,
633
+ "logits/chosen": -0.13871899247169495,
634
+ "logits/rejected": -0.02639787271618843,
635
+ "logps/chosen": -330.0691833496094,
636
+ "logps/rejected": -390.55267333984375,
637
+ "loss": 0.5309,
638
+ "rewards/accuracies": 0.75,
639
+ "rewards/chosen": -0.5334495306015015,
640
+ "rewards/margins": 0.5370527505874634,
641
+ "rewards/rejected": -1.0705024003982544,
642
+ "step": 380
643
+ },
644
+ {
645
+ "epoch": 0.40816326530612246,
646
+ "grad_norm": 14.72003311606877,
647
+ "learning_rate": 3.6888102953122304e-07,
648
+ "logits/chosen": -0.16793277859687805,
649
+ "logits/rejected": -0.06103026866912842,
650
+ "logps/chosen": -300.54656982421875,
651
+ "logps/rejected": -357.5650634765625,
652
+ "loss": 0.5411,
653
+ "rewards/accuracies": 0.6875,
654
+ "rewards/chosen": -0.5054274201393127,
655
+ "rewards/margins": 0.5430657863616943,
656
+ "rewards/rejected": -1.0484931468963623,
657
+ "step": 390
658
+ },
659
+ {
660
+ "epoch": 0.4186289900575615,
661
+ "grad_norm": 20.850926855130513,
662
+ "learning_rate": 3.607600562872785e-07,
663
+ "logits/chosen": -0.2158089131116867,
664
+ "logits/rejected": -0.2511315941810608,
665
+ "logps/chosen": -333.9248046875,
666
+ "logps/rejected": -421.63818359375,
667
+ "loss": 0.5508,
668
+ "rewards/accuracies": 0.6625000238418579,
669
+ "rewards/chosen": -0.6180536150932312,
670
+ "rewards/margins": 0.4438760280609131,
671
+ "rewards/rejected": -1.0619295835494995,
672
+ "step": 400
673
+ },
674
+ {
675
+ "epoch": 0.4186289900575615,
676
+ "eval_logits/chosen": -0.29057949781417847,
677
+ "eval_logits/rejected": -0.09457352012395859,
678
+ "eval_logps/chosen": -361.9530334472656,
679
+ "eval_logps/rejected": -406.53271484375,
680
+ "eval_loss": 0.5345055460929871,
681
+ "eval_rewards/accuracies": 0.7420634627342224,
682
+ "eval_rewards/chosen": -0.5684300065040588,
683
+ "eval_rewards/margins": 0.6837278604507446,
684
+ "eval_rewards/rejected": -1.2521578073501587,
685
+ "eval_runtime": 229.6469,
686
+ "eval_samples_per_second": 8.709,
687
+ "eval_steps_per_second": 0.274,
688
+ "step": 400
689
+ },
690
+ {
691
+ "epoch": 0.4290947148090005,
692
+ "grad_norm": 10.298070998489017,
693
+ "learning_rate": 3.5249095128531856e-07,
694
+ "logits/chosen": -0.1405165195465088,
695
+ "logits/rejected": 0.06610298156738281,
696
+ "logps/chosen": -367.21270751953125,
697
+ "logps/rejected": -396.1885681152344,
698
+ "loss": 0.5457,
699
+ "rewards/accuracies": 0.762499988079071,
700
+ "rewards/chosen": -0.6680795550346375,
701
+ "rewards/margins": 0.6149377822875977,
702
+ "rewards/rejected": -1.2830173969268799,
703
+ "step": 410
704
+ },
705
+ {
706
+ "epoch": 0.43956043956043955,
707
+ "grad_norm": 11.725780840224937,
708
+ "learning_rate": 3.4408477372034736e-07,
709
+ "logits/chosen": -0.3442925810813904,
710
+ "logits/rejected": -0.0029064356349408627,
711
+ "logps/chosen": -413.73199462890625,
712
+ "logps/rejected": -388.2182312011719,
713
+ "loss": 0.53,
714
+ "rewards/accuracies": 0.675000011920929,
715
+ "rewards/chosen": -0.6328467130661011,
716
+ "rewards/margins": 0.5191168189048767,
717
+ "rewards/rejected": -1.151963472366333,
718
+ "step": 420
719
+ },
720
+ {
721
+ "epoch": 0.4500261643118786,
722
+ "grad_norm": 13.27922986778142,
723
+ "learning_rate": 3.3555276610977276e-07,
724
+ "logits/chosen": -0.06918157637119293,
725
+ "logits/rejected": 0.0864952802658081,
726
+ "logps/chosen": -294.5364074707031,
727
+ "logps/rejected": -392.9295654296875,
728
+ "loss": 0.5241,
729
+ "rewards/accuracies": 0.7749999761581421,
730
+ "rewards/chosen": -0.6482856273651123,
731
+ "rewards/margins": 0.7281777262687683,
732
+ "rewards/rejected": -1.3764632940292358,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 0.4604918890633176,
737
+ "grad_norm": 12.766468960268535,
738
+ "learning_rate": 3.269063392575352e-07,
739
+ "logits/chosen": 0.02183201164007187,
740
+ "logits/rejected": 0.08124883472919464,
741
+ "logps/chosen": -320.3900146484375,
742
+ "logps/rejected": -374.9771728515625,
743
+ "loss": 0.5365,
744
+ "rewards/accuracies": 0.699999988079071,
745
+ "rewards/chosen": -0.6313291788101196,
746
+ "rewards/margins": 0.6131441593170166,
747
+ "rewards/rejected": -1.2444733381271362,
748
+ "step": 440
749
+ },
750
+ {
751
+ "epoch": 0.47095761381475665,
752
+ "grad_norm": 11.245463417386828,
753
+ "learning_rate": 3.1815705699316964e-07,
754
+ "logits/chosen": -0.10290797799825668,
755
+ "logits/rejected": 0.08914367109537125,
756
+ "logps/chosen": -369.95697021484375,
757
+ "logps/rejected": -416.80853271484375,
758
+ "loss": 0.5481,
759
+ "rewards/accuracies": 0.75,
760
+ "rewards/chosen": -0.7395186424255371,
761
+ "rewards/margins": 0.693315863609314,
762
+ "rewards/rejected": -1.432834506034851,
763
+ "step": 450
764
+ },
765
+ {
766
+ "epoch": 0.48142333856619574,
767
+ "grad_norm": 12.061083908845202,
768
+ "learning_rate": 3.0931662070620794e-07,
769
+ "logits/chosen": -0.15375325083732605,
770
+ "logits/rejected": 0.12264730781316757,
771
+ "logps/chosen": -304.6129455566406,
772
+ "logps/rejected": -363.7560729980469,
773
+ "loss": 0.5406,
774
+ "rewards/accuracies": 0.8125,
775
+ "rewards/chosen": -0.6241869926452637,
776
+ "rewards/margins": 0.7929509878158569,
777
+ "rewards/rejected": -1.4171379804611206,
778
+ "step": 460
779
+ },
780
+ {
781
+ "epoch": 0.49188906331763477,
782
+ "grad_norm": 14.419339908169533,
783
+ "learning_rate": 3.003968536966078e-07,
784
+ "logits/chosen": -0.03872384503483772,
785
+ "logits/rejected": 0.09880894422531128,
786
+ "logps/chosen": -359.7276916503906,
787
+ "logps/rejected": -385.1877746582031,
788
+ "loss": 0.5015,
789
+ "rewards/accuracies": 0.7124999761581421,
790
+ "rewards/chosen": -0.6624265909194946,
791
+ "rewards/margins": 0.4815330505371094,
792
+ "rewards/rejected": -1.1439597606658936,
793
+ "step": 470
794
+ },
795
+ {
796
+ "epoch": 0.5023547880690737,
797
+ "grad_norm": 17.404394779820596,
798
+ "learning_rate": 2.9140968536213693e-07,
799
+ "logits/chosen": -0.11226484924554825,
800
+ "logits/rejected": 0.18643446266651154,
801
+ "logps/chosen": -360.540771484375,
802
+ "logps/rejected": -442.9476013183594,
803
+ "loss": 0.5182,
804
+ "rewards/accuracies": 0.8125,
805
+ "rewards/chosen": -0.6307897567749023,
806
+ "rewards/margins": 0.9138120412826538,
807
+ "rewards/rejected": -1.5446019172668457,
808
+ "step": 480
809
+ },
810
+ {
811
+ "epoch": 0.5128205128205128,
812
+ "grad_norm": 12.356577288104344,
813
+ "learning_rate": 2.823671352438608e-07,
814
+ "logits/chosen": -0.19844231009483337,
815
+ "logits/rejected": 0.178545281291008,
816
+ "logps/chosen": -383.7408752441406,
817
+ "logps/rejected": -408.270263671875,
818
+ "loss": 0.5192,
819
+ "rewards/accuracies": 0.8374999761581421,
820
+ "rewards/chosen": -0.5568854212760925,
821
+ "rewards/margins": 0.7981684803962708,
822
+ "rewards/rejected": -1.3550540208816528,
823
+ "step": 490
824
+ },
825
+ {
826
+ "epoch": 0.5232862375719518,
827
+ "grad_norm": 11.064578503088146,
828
+ "learning_rate": 2.73281296951072e-07,
829
+ "logits/chosen": -0.09013144671916962,
830
+ "logits/rejected": 0.18840348720550537,
831
+ "logps/chosen": -322.2913513183594,
832
+ "logps/rejected": -380.41986083984375,
833
+ "loss": 0.5182,
834
+ "rewards/accuracies": 0.7250000238418579,
835
+ "rewards/chosen": -0.6678560972213745,
836
+ "rewards/margins": 0.7828424572944641,
837
+ "rewards/rejected": -1.4506986141204834,
838
+ "step": 500
839
+ },
840
+ {
841
+ "epoch": 0.5232862375719518,
842
+ "eval_logits/chosen": 0.010395529679954052,
843
+ "eval_logits/rejected": 0.26224514842033386,
844
+ "eval_logps/chosen": -401.8782653808594,
845
+ "eval_logps/rejected": -446.0949401855469,
846
+ "eval_loss": 0.5285578370094299,
847
+ "eval_rewards/accuracies": 0.726190447807312,
848
+ "eval_rewards/chosen": -0.967681884765625,
849
+ "eval_rewards/margins": 0.6800985932350159,
850
+ "eval_rewards/rejected": -1.6477805376052856,
851
+ "eval_runtime": 228.783,
852
+ "eval_samples_per_second": 8.742,
853
+ "eval_steps_per_second": 0.275,
854
+ "step": 500
855
+ },
856
+ {
857
+ "epoch": 0.533751962323391,
858
+ "grad_norm": 15.367822240197881,
859
+ "learning_rate": 2.641643219871597e-07,
860
+ "logits/chosen": 0.03556225821375847,
861
+ "logits/rejected": 0.202899768948555,
862
+ "logps/chosen": -343.4957580566406,
863
+ "logps/rejected": -412.44451904296875,
864
+ "loss": 0.5094,
865
+ "rewards/accuracies": 0.762499988079071,
866
+ "rewards/chosen": -0.7577770948410034,
867
+ "rewards/margins": 0.8080625534057617,
868
+ "rewards/rejected": -1.5658397674560547,
869
+ "step": 510
870
+ },
871
+ {
872
+ "epoch": 0.54421768707483,
873
+ "grad_norm": 13.43430915472883,
874
+ "learning_rate": 2.550284034980507e-07,
875
+ "logits/chosen": 0.07642324268817902,
876
+ "logits/rejected": 0.16387151181697845,
877
+ "logps/chosen": -363.08551025390625,
878
+ "logps/rejected": -401.5746154785156,
879
+ "loss": 0.5326,
880
+ "rewards/accuracies": 0.675000011920929,
881
+ "rewards/chosen": -0.887750506401062,
882
+ "rewards/margins": 0.5795798301696777,
883
+ "rewards/rejected": -1.4673302173614502,
884
+ "step": 520
885
+ },
886
+ {
887
+ "epoch": 0.554683411826269,
888
+ "grad_norm": 16.90441101569984,
889
+ "learning_rate": 2.4588575996495794e-07,
890
+ "logits/chosen": -0.1090664491057396,
891
+ "logits/rejected": 0.1268138587474823,
892
+ "logps/chosen": -387.29510498046875,
893
+ "logps/rejected": -459.40264892578125,
894
+ "loss": 0.5107,
895
+ "rewards/accuracies": 0.8125,
896
+ "rewards/chosen": -0.6459606885910034,
897
+ "rewards/margins": 0.941811203956604,
898
+ "rewards/rejected": -1.5877716541290283,
899
+ "step": 530
900
+ },
901
+ {
902
+ "epoch": 0.565149136577708,
903
+ "grad_norm": 12.507368084637326,
904
+ "learning_rate": 2.367486188632446e-07,
905
+ "logits/chosen": -0.039569877088069916,
906
+ "logits/rejected": 0.09773418307304382,
907
+ "logps/chosen": -366.95806884765625,
908
+ "logps/rejected": -394.683349609375,
909
+ "loss": 0.5247,
910
+ "rewards/accuracies": 0.7250000238418579,
911
+ "rewards/chosen": -0.6594400405883789,
912
+ "rewards/margins": 0.6299028992652893,
913
+ "rewards/rejected": -1.289342999458313,
914
+ "step": 540
915
+ },
916
+ {
917
+ "epoch": 0.5756148613291471,
918
+ "grad_norm": 13.366833820050662,
919
+ "learning_rate": 2.276292003092593e-07,
920
+ "logits/chosen": -0.1256514936685562,
921
+ "logits/rejected": 0.14441490173339844,
922
+ "logps/chosen": -369.62310791015625,
923
+ "logps/rejected": -368.75726318359375,
924
+ "loss": 0.5278,
925
+ "rewards/accuracies": 0.699999988079071,
926
+ "rewards/chosen": -0.6649243235588074,
927
+ "rewards/margins": 0.5402246117591858,
928
+ "rewards/rejected": -1.2051489353179932,
929
+ "step": 550
930
+ },
931
+ {
932
+ "epoch": 0.5860805860805861,
933
+ "grad_norm": 15.734482090071854,
934
+ "learning_rate": 2.185397007170141e-07,
935
+ "logits/chosen": 0.1283525973558426,
936
+ "logits/rejected": 0.24960267543792725,
937
+ "logps/chosen": -285.6956787109375,
938
+ "logps/rejected": -360.90509033203125,
939
+ "loss": 0.532,
940
+ "rewards/accuracies": 0.737500011920929,
941
+ "rewards/chosen": -0.4988901615142822,
942
+ "rewards/margins": 0.7280929684638977,
943
+ "rewards/rejected": -1.2269830703735352,
944
+ "step": 560
945
+ },
946
+ {
947
+ "epoch": 0.5965463108320251,
948
+ "grad_norm": 12.793432900556661,
949
+ "learning_rate": 2.094922764865619e-07,
950
+ "logits/chosen": 0.18585723638534546,
951
+ "logits/rejected": 0.2675357460975647,
952
+ "logps/chosen": -364.28521728515625,
953
+ "logps/rejected": -457.3734436035156,
954
+ "loss": 0.5118,
955
+ "rewards/accuracies": 0.7124999761581421,
956
+ "rewards/chosen": -1.0480437278747559,
957
+ "rewards/margins": 0.5522451996803284,
958
+ "rewards/rejected": -1.600288987159729,
959
+ "step": 570
960
+ },
961
+ {
962
+ "epoch": 0.6070120355834642,
963
+ "grad_norm": 14.54170281598928,
964
+ "learning_rate": 2.0049902774588797e-07,
965
+ "logits/chosen": 0.08888915926218033,
966
+ "logits/rejected": 0.3060843050479889,
967
+ "logps/chosen": -333.07373046875,
968
+ "logps/rejected": -380.82818603515625,
969
+ "loss": 0.5291,
970
+ "rewards/accuracies": 0.6875,
971
+ "rewards/chosen": -0.7626341581344604,
972
+ "rewards/margins": 0.7598274946212769,
973
+ "rewards/rejected": -1.5224617719650269,
974
+ "step": 580
975
+ },
976
+ {
977
+ "epoch": 0.6174777603349032,
978
+ "grad_norm": 14.79212886128343,
979
+ "learning_rate": 1.9157198216806238e-07,
980
+ "logits/chosen": -0.1875981092453003,
981
+ "logits/rejected": 0.05693904310464859,
982
+ "logps/chosen": -382.46868896484375,
983
+ "logps/rejected": -439.0375061035156,
984
+ "loss": 0.4968,
985
+ "rewards/accuracies": 0.7124999761581421,
986
+ "rewards/chosen": -0.6531257629394531,
987
+ "rewards/margins": 0.8373476266860962,
988
+ "rewards/rejected": -1.4904735088348389,
989
+ "step": 590
990
+ },
991
+ {
992
+ "epoch": 0.6279434850863422,
993
+ "grad_norm": 13.087506840381133,
994
+ "learning_rate": 1.8272307888529274e-07,
995
+ "logits/chosen": 0.026107722893357277,
996
+ "logits/rejected": 0.29632943868637085,
997
+ "logps/chosen": -438.74761962890625,
998
+ "logps/rejected": -426.4642639160156,
999
+ "loss": 0.4936,
1000
+ "rewards/accuracies": 0.675000011920929,
1001
+ "rewards/chosen": -0.7559309601783752,
1002
+ "rewards/margins": 0.568900465965271,
1003
+ "rewards/rejected": -1.324831247329712,
1004
+ "step": 600
1005
+ },
1006
+ {
1007
+ "epoch": 0.6279434850863422,
1008
+ "eval_logits/chosen": -0.10911048203706741,
1009
+ "eval_logits/rejected": 0.15195031464099884,
1010
+ "eval_logps/chosen": -361.6075439453125,
1011
+ "eval_logps/rejected": -419.2403259277344,
1012
+ "eval_loss": 0.5254898071289062,
1013
+ "eval_rewards/accuracies": 0.7777777910232544,
1014
+ "eval_rewards/chosen": -0.5649750232696533,
1015
+ "eval_rewards/margins": 0.8142591118812561,
1016
+ "eval_rewards/rejected": -1.3792341947555542,
1017
+ "eval_runtime": 228.5101,
1018
+ "eval_samples_per_second": 8.752,
1019
+ "eval_steps_per_second": 0.276,
1020
+ "step": 600
1021
+ },
1022
+ {
1023
+ "epoch": 0.6384092098377813,
1024
+ "grad_norm": 16.705575529622894,
1025
+ "learning_rate": 1.7396415252139288e-07,
1026
+ "logits/chosen": -0.07063977420330048,
1027
+ "logits/rejected": 0.1474047303199768,
1028
+ "logps/chosen": -378.7034912109375,
1029
+ "logps/rejected": -424.65838623046875,
1030
+ "loss": 0.5334,
1031
+ "rewards/accuracies": 0.7749999761581421,
1032
+ "rewards/chosen": -0.5486348271369934,
1033
+ "rewards/margins": 0.8419520258903503,
1034
+ "rewards/rejected": -1.3905869722366333,
1035
+ "step": 610
1036
+ },
1037
+ {
1038
+ "epoch": 0.6488749345892203,
1039
+ "grad_norm": 16.680744128054243,
1040
+ "learning_rate": 1.6530691736402316e-07,
1041
+ "logits/chosen": -0.06789754331111908,
1042
+ "logits/rejected": 0.1724042445421219,
1043
+ "logps/chosen": -380.02203369140625,
1044
+ "logps/rejected": -394.5411071777344,
1045
+ "loss": 0.4879,
1046
+ "rewards/accuracies": 0.7749999761581421,
1047
+ "rewards/chosen": -0.6499656438827515,
1048
+ "rewards/margins": 0.8682801127433777,
1049
+ "rewards/rejected": -1.5182459354400635,
1050
+ "step": 620
1051
+ },
1052
+ {
1053
+ "epoch": 0.6593406593406593,
1054
+ "grad_norm": 12.347293383994431,
1055
+ "learning_rate": 1.5676295169786864e-07,
1056
+ "logits/chosen": -0.008408618159592152,
1057
+ "logits/rejected": 0.24951598048210144,
1058
+ "logps/chosen": -339.1716003417969,
1059
+ "logps/rejected": -378.724609375,
1060
+ "loss": 0.4863,
1061
+ "rewards/accuracies": 0.7875000238418579,
1062
+ "rewards/chosen": -0.5663776397705078,
1063
+ "rewards/margins": 0.7724849581718445,
1064
+ "rewards/rejected": -1.338862657546997,
1065
+ "step": 630
1066
+ },
1067
+ {
1068
+ "epoch": 0.6698063840920984,
1069
+ "grad_norm": 16.072115808650597,
1070
+ "learning_rate": 1.483436823197092e-07,
1071
+ "logits/chosen": 0.09198600053787231,
1072
+ "logits/rejected": 0.36397668719291687,
1073
+ "logps/chosen": -319.0302734375,
1074
+ "logps/rejected": -378.3144836425781,
1075
+ "loss": 0.508,
1076
+ "rewards/accuracies": 0.762499988079071,
1077
+ "rewards/chosen": -0.670558750629425,
1078
+ "rewards/margins": 0.8327127695083618,
1079
+ "rewards/rejected": -1.5032716989517212,
1080
+ "step": 640
1081
+ },
1082
+ {
1083
+ "epoch": 0.6802721088435374,
1084
+ "grad_norm": 13.081700015736937,
1085
+ "learning_rate": 1.4006036925609243e-07,
1086
+ "logits/chosen": 0.07456495612859726,
1087
+ "logits/rejected": 0.13473407924175262,
1088
+ "logps/chosen": -359.57244873046875,
1089
+ "logps/rejected": -442.2086486816406,
1090
+ "loss": 0.5214,
1091
+ "rewards/accuracies": 0.737500011920929,
1092
+ "rewards/chosen": -0.6933525800704956,
1093
+ "rewards/margins": 0.744998574256897,
1094
+ "rewards/rejected": -1.4383512735366821,
1095
+ "step": 650
1096
+ },
1097
+ {
1098
+ "epoch": 0.6907378335949764,
1099
+ "grad_norm": 12.02667717260737,
1100
+ "learning_rate": 1.319240907040458e-07,
1101
+ "logits/chosen": -0.20533649623394012,
1102
+ "logits/rejected": -0.07553025335073471,
1103
+ "logps/chosen": -349.34393310546875,
1104
+ "logps/rejected": -402.1936950683594,
1105
+ "loss": 0.5014,
1106
+ "rewards/accuracies": 0.8500000238418579,
1107
+ "rewards/chosen": -0.6376605033874512,
1108
+ "rewards/margins": 0.7424057126045227,
1109
+ "rewards/rejected": -1.3800660371780396,
1110
+ "step": 660
1111
+ },
1112
+ {
1113
+ "epoch": 0.7012035583464155,
1114
+ "grad_norm": 15.027313729601833,
1115
+ "learning_rate": 1.239457282149695e-07,
1116
+ "logits/chosen": -0.05018684267997742,
1117
+ "logits/rejected": 0.18970593810081482,
1118
+ "logps/chosen": -376.6749267578125,
1119
+ "logps/rejected": -431.7818298339844,
1120
+ "loss": 0.504,
1121
+ "rewards/accuracies": 0.699999988079071,
1122
+ "rewards/chosen": -0.8920845985412598,
1123
+ "rewards/margins": 0.7974964380264282,
1124
+ "rewards/rejected": -1.6895811557769775,
1125
+ "step": 670
1126
+ },
1127
+ {
1128
+ "epoch": 0.7116692830978545,
1129
+ "grad_norm": 14.878904971459766,
1130
+ "learning_rate": 1.1613595214152711e-07,
1131
+ "logits/chosen": 0.09591469913721085,
1132
+ "logits/rejected": 0.25994688272476196,
1133
+ "logps/chosen": -417.44451904296875,
1134
+ "logps/rejected": -451.24029541015625,
1135
+ "loss": 0.532,
1136
+ "rewards/accuracies": 0.675000011920929,
1137
+ "rewards/chosen": -0.8660572171211243,
1138
+ "rewards/margins": 0.6547313928604126,
1139
+ "rewards/rejected": -1.520788550376892,
1140
+ "step": 680
1141
+ },
1142
+ {
1143
+ "epoch": 0.7221350078492935,
1144
+ "grad_norm": 14.634724746319717,
1145
+ "learning_rate": 1.0850520736699362e-07,
1146
+ "logits/chosen": -0.1941656470298767,
1147
+ "logits/rejected": 0.12699861824512482,
1148
+ "logps/chosen": -326.013671875,
1149
+ "logps/rejected": -370.91943359375,
1150
+ "loss": 0.4932,
1151
+ "rewards/accuracies": 0.7749999761581421,
1152
+ "rewards/chosen": -0.4801979064941406,
1153
+ "rewards/margins": 0.7524018883705139,
1154
+ "rewards/rejected": -1.2325998544692993,
1155
+ "step": 690
1156
+ },
1157
+ {
1158
+ "epoch": 0.7326007326007326,
1159
+ "grad_norm": 14.113815152461443,
1160
+ "learning_rate": 1.0106369933615042e-07,
1161
+ "logits/chosen": 0.024108683690428734,
1162
+ "logits/rejected": 0.23957733809947968,
1163
+ "logps/chosen": -328.45379638671875,
1164
+ "logps/rejected": -388.8372802734375,
1165
+ "loss": 0.4967,
1166
+ "rewards/accuracies": 0.8125,
1167
+ "rewards/chosen": -0.5869916677474976,
1168
+ "rewards/margins": 0.8734583854675293,
1169
+ "rewards/rejected": -1.4604499340057373,
1170
+ "step": 700
1171
+ },
1172
+ {
1173
+ "epoch": 0.7326007326007326,
1174
+ "eval_logits/chosen": -0.07450958341360092,
1175
+ "eval_logits/rejected": 0.1898072510957718,
1176
+ "eval_logps/chosen": -361.9271240234375,
1177
+ "eval_logps/rejected": -419.3396911621094,
1178
+ "eval_loss": 0.5152686834335327,
1179
+ "eval_rewards/accuracies": 0.7698412537574768,
1180
+ "eval_rewards/chosen": -0.5681709051132202,
1181
+ "eval_rewards/margins": 0.8120573163032532,
1182
+ "eval_rewards/rejected": -1.3802281618118286,
1183
+ "eval_runtime": 227.5622,
1184
+ "eval_samples_per_second": 8.789,
1185
+ "eval_steps_per_second": 0.277,
1186
+ "step": 700
1187
+ },
1188
+ {
1189
+ "epoch": 0.7430664573521716,
1190
+ "grad_norm": 14.271632027776764,
1191
+ "learning_rate": 9.382138040640714e-08,
1192
+ "logits/chosen": 0.04855802655220032,
1193
+ "logits/rejected": 0.28160515427589417,
1194
+ "logps/chosen": -356.8811340332031,
1195
+ "logps/rejected": -407.11016845703125,
1196
+ "loss": 0.54,
1197
+ "rewards/accuracies": 0.75,
1198
+ "rewards/chosen": -0.633429229259491,
1199
+ "rewards/margins": 0.8234784007072449,
1200
+ "rewards/rejected": -1.4569077491760254,
1201
+ "step": 710
1202
+ },
1203
+ {
1204
+ "epoch": 0.7535321821036107,
1205
+ "grad_norm": 14.7255604500235,
1206
+ "learning_rate": 8.678793653740632e-08,
1207
+ "logits/chosen": -0.11811058223247528,
1208
+ "logits/rejected": 0.15506412088871002,
1209
+ "logps/chosen": -412.05828857421875,
1210
+ "logps/rejected": -441.4915466308594,
1211
+ "loss": 0.5102,
1212
+ "rewards/accuracies": 0.7749999761581421,
1213
+ "rewards/chosen": -0.5466042757034302,
1214
+ "rewards/margins": 0.7746764421463013,
1215
+ "rewards/rejected": -1.321280837059021,
1216
+ "step": 720
1217
+ },
1218
+ {
1219
+ "epoch": 0.7639979068550498,
1220
+ "grad_norm": 12.966643699102985,
1221
+ "learning_rate": 7.997277433690983e-08,
1222
+ "logits/chosen": 0.04536755010485649,
1223
+ "logits/rejected": 0.23015692830085754,
1224
+ "logps/chosen": -310.0247802734375,
1225
+ "logps/rejected": -372.56414794921875,
1226
+ "loss": 0.5061,
1227
+ "rewards/accuracies": 0.737500011920929,
1228
+ "rewards/chosen": -0.5736085772514343,
1229
+ "rewards/margins": 0.6976566314697266,
1230
+ "rewards/rejected": -1.2712653875350952,
1231
+ "step": 730
1232
+ },
1233
+ {
1234
+ "epoch": 0.7744636316064888,
1235
+ "grad_norm": 15.46077305724092,
1236
+ "learning_rate": 7.338500848029602e-08,
1237
+ "logits/chosen": -0.012909619137644768,
1238
+ "logits/rejected": 0.290834903717041,
1239
+ "logps/chosen": -359.9085693359375,
1240
+ "logps/rejected": -388.9847412109375,
1241
+ "loss": 0.5014,
1242
+ "rewards/accuracies": 0.800000011920929,
1243
+ "rewards/chosen": -0.6467846632003784,
1244
+ "rewards/margins": 0.8261871337890625,
1245
+ "rewards/rejected": -1.4729719161987305,
1246
+ "step": 740
1247
+ },
1248
+ {
1249
+ "epoch": 0.7849293563579278,
1250
+ "grad_norm": 12.699158494833881,
1251
+ "learning_rate": 6.70334495204884e-08,
1252
+ "logits/chosen": 0.2477736920118332,
1253
+ "logits/rejected": 0.3875717520713806,
1254
+ "logps/chosen": -341.1034240722656,
1255
+ "logps/rejected": -454.39508056640625,
1256
+ "loss": 0.4955,
1257
+ "rewards/accuracies": 0.75,
1258
+ "rewards/chosen": -0.8063145875930786,
1259
+ "rewards/margins": 1.0078232288360596,
1260
+ "rewards/rejected": -1.8141378164291382,
1261
+ "step": 750
1262
+ },
1263
+ {
1264
+ "epoch": 0.7953950811093669,
1265
+ "grad_norm": 14.871002690544678,
1266
+ "learning_rate": 6.092659210462231e-08,
1267
+ "logits/chosen": 0.16597142815589905,
1268
+ "logits/rejected": 0.29944753646850586,
1269
+ "logps/chosen": -337.59442138671875,
1270
+ "logps/rejected": -443.64886474609375,
1271
+ "loss": 0.527,
1272
+ "rewards/accuracies": 0.7250000238418579,
1273
+ "rewards/chosen": -0.8204069137573242,
1274
+ "rewards/margins": 0.8414122462272644,
1275
+ "rewards/rejected": -1.6618191003799438,
1276
+ "step": 760
1277
+ },
1278
+ {
1279
+ "epoch": 0.8058608058608059,
1280
+ "grad_norm": 10.845535445418564,
1281
+ "learning_rate": 5.507260361320737e-08,
1282
+ "logits/chosen": -0.017479026690125465,
1283
+ "logits/rejected": 0.06391821056604385,
1284
+ "logps/chosen": -433.59112548828125,
1285
+ "logps/rejected": -509.65447998046875,
1286
+ "loss": 0.4918,
1287
+ "rewards/accuracies": 0.675000011920929,
1288
+ "rewards/chosen": -0.8355442881584167,
1289
+ "rewards/margins": 0.6438878774642944,
1290
+ "rewards/rejected": -1.479432225227356,
1291
+ "step": 770
1292
+ },
1293
+ {
1294
+ "epoch": 0.8163265306122449,
1295
+ "grad_norm": 11.773034698588397,
1296
+ "learning_rate": 4.947931323697982e-08,
1297
+ "logits/chosen": -0.04249686002731323,
1298
+ "logits/rejected": 0.2673026919364929,
1299
+ "logps/chosen": -412.539306640625,
1300
+ "logps/rejected": -427.43341064453125,
1301
+ "loss": 0.5184,
1302
+ "rewards/accuracies": 0.7749999761581421,
1303
+ "rewards/chosen": -0.6462075114250183,
1304
+ "rewards/margins": 0.8836655616760254,
1305
+ "rewards/rejected": -1.5298731327056885,
1306
+ "step": 780
1307
+ },
1308
+ {
1309
+ "epoch": 0.826792255363684,
1310
+ "grad_norm": 11.606763284832994,
1311
+ "learning_rate": 4.415420150605398e-08,
1312
+ "logits/chosen": 0.10231854766607285,
1313
+ "logits/rejected": 0.40341711044311523,
1314
+ "logps/chosen": -354.8089904785156,
1315
+ "logps/rejected": -397.8169250488281,
1316
+ "loss": 0.5183,
1317
+ "rewards/accuracies": 0.762499988079071,
1318
+ "rewards/chosen": -0.7357110977172852,
1319
+ "rewards/margins": 0.7533496022224426,
1320
+ "rewards/rejected": -1.489060640335083,
1321
+ "step": 790
1322
+ },
1323
+ {
1324
+ "epoch": 0.837257980115123,
1325
+ "grad_norm": 12.816494511651417,
1326
+ "learning_rate": 3.9104390285376374e-08,
1327
+ "logits/chosen": 0.12218976020812988,
1328
+ "logits/rejected": 0.38090264797210693,
1329
+ "logps/chosen": -338.58837890625,
1330
+ "logps/rejected": -401.3483581542969,
1331
+ "loss": 0.5013,
1332
+ "rewards/accuracies": 0.7250000238418579,
1333
+ "rewards/chosen": -0.7186459302902222,
1334
+ "rewards/margins": 0.7460201978683472,
1335
+ "rewards/rejected": -1.4646661281585693,
1336
+ "step": 800
1337
+ },
1338
+ {
1339
+ "epoch": 0.837257980115123,
1340
+ "eval_logits/chosen": -0.030678851529955864,
1341
+ "eval_logits/rejected": 0.2384355366230011,
1342
+ "eval_logps/chosen": -372.57794189453125,
1343
+ "eval_logps/rejected": -429.2723388671875,
1344
+ "eval_loss": 0.51365065574646,
1345
+ "eval_rewards/accuracies": 0.7658730149269104,
1346
+ "eval_rewards/chosen": -0.6746787428855896,
1347
+ "eval_rewards/margins": 0.8048755526542664,
1348
+ "eval_rewards/rejected": -1.479554295539856,
1349
+ "eval_runtime": 228.9904,
1350
+ "eval_samples_per_second": 8.734,
1351
+ "eval_steps_per_second": 0.275,
1352
+ "step": 800
1353
+ },
1354
+ {
1355
+ "epoch": 0.847723704866562,
1356
+ "grad_norm": 11.081154469888522,
1357
+ "learning_rate": 3.433663324986208e-08,
1358
+ "logits/chosen": -0.09540579468011856,
1359
+ "logits/rejected": 0.3088434338569641,
1360
+ "logps/chosen": -419.98004150390625,
1361
+ "logps/rejected": -432.2977600097656,
1362
+ "loss": 0.5109,
1363
+ "rewards/accuracies": 0.762499988079071,
1364
+ "rewards/chosen": -0.7216218709945679,
1365
+ "rewards/margins": 0.8065996170043945,
1366
+ "rewards/rejected": -1.5282217264175415,
1367
+ "step": 810
1368
+ },
1369
+ {
1370
+ "epoch": 0.858189429618001,
1371
+ "grad_norm": 13.289574828613516,
1372
+ "learning_rate": 2.9857306851953897e-08,
1373
+ "logits/chosen": -0.0814175009727478,
1374
+ "logits/rejected": 0.054249562323093414,
1375
+ "logps/chosen": -355.91827392578125,
1376
+ "logps/rejected": -427.5276794433594,
1377
+ "loss": 0.5071,
1378
+ "rewards/accuracies": 0.824999988079071,
1379
+ "rewards/chosen": -0.6057306528091431,
1380
+ "rewards/margins": 0.9160734415054321,
1381
+ "rewards/rejected": -1.521803855895996,
1382
+ "step": 820
1383
+ },
1384
+ {
1385
+ "epoch": 0.8686551543694401,
1386
+ "grad_norm": 13.234567409276869,
1387
+ "learning_rate": 2.567240179368185e-08,
1388
+ "logits/chosen": 0.14148227870464325,
1389
+ "logits/rejected": 0.24102012813091278,
1390
+ "logps/chosen": -332.89105224609375,
1391
+ "logps/rejected": -437.22869873046875,
1392
+ "loss": 0.5167,
1393
+ "rewards/accuracies": 0.7124999761581421,
1394
+ "rewards/chosen": -0.7850676774978638,
1395
+ "rewards/margins": 0.8141494989395142,
1396
+ "rewards/rejected": -1.599217176437378,
1397
+ "step": 830
1398
+ },
1399
+ {
1400
+ "epoch": 0.8791208791208791,
1401
+ "grad_norm": 12.940970521328797,
1402
+ "learning_rate": 2.1787515014630357e-08,
1403
+ "logits/chosen": 0.15607428550720215,
1404
+ "logits/rejected": 0.3544492721557617,
1405
+ "logps/chosen": -375.6202087402344,
1406
+ "logps/rejected": -448.9365234375,
1407
+ "loss": 0.4882,
1408
+ "rewards/accuracies": 0.7875000238418579,
1409
+ "rewards/chosen": -0.8064106702804565,
1410
+ "rewards/margins": 0.7966355681419373,
1411
+ "rewards/rejected": -1.603046178817749,
1412
+ "step": 840
1413
+ },
1414
+ {
1415
+ "epoch": 0.8895866038723181,
1416
+ "grad_norm": 10.926513179024628,
1417
+ "learning_rate": 1.820784220652766e-08,
1418
+ "logits/chosen": 0.1379309892654419,
1419
+ "logits/rejected": 0.2978866696357727,
1420
+ "logps/chosen": -351.2433166503906,
1421
+ "logps/rejected": -427.3173828125,
1422
+ "loss": 0.4724,
1423
+ "rewards/accuracies": 0.8125,
1424
+ "rewards/chosen": -0.6311872005462646,
1425
+ "rewards/margins": 0.944290280342102,
1426
+ "rewards/rejected": -1.5754774808883667,
1427
+ "step": 850
1428
+ },
1429
+ {
1430
+ "epoch": 0.9000523286237572,
1431
+ "grad_norm": 12.635492079106957,
1432
+ "learning_rate": 1.4938170864468636e-08,
1433
+ "logits/chosen": 0.00023215674445964396,
1434
+ "logits/rejected": 0.2956046462059021,
1435
+ "logps/chosen": -393.70452880859375,
1436
+ "logps/rejected": -441.1329040527344,
1437
+ "loss": 0.4956,
1438
+ "rewards/accuracies": 0.7749999761581421,
1439
+ "rewards/chosen": -0.7465956807136536,
1440
+ "rewards/margins": 0.7083271741867065,
1441
+ "rewards/rejected": -1.4549229145050049,
1442
+ "step": 860
1443
+ },
1444
+ {
1445
+ "epoch": 0.9105180533751962,
1446
+ "grad_norm": 12.131310754312283,
1447
+ "learning_rate": 1.1982873884064465e-08,
1448
+ "logits/chosen": -0.05417024344205856,
1449
+ "logits/rejected": 0.28210294246673584,
1450
+ "logps/chosen": -321.20526123046875,
1451
+ "logps/rejected": -356.74053955078125,
1452
+ "loss": 0.5037,
1453
+ "rewards/accuracies": 0.762499988079071,
1454
+ "rewards/chosen": -0.6472475528717041,
1455
+ "rewards/margins": 0.7646856904029846,
1456
+ "rewards/rejected": -1.411933183670044,
1457
+ "step": 870
1458
+ },
1459
+ {
1460
+ "epoch": 0.9209837781266352,
1461
+ "grad_norm": 14.916064360686926,
1462
+ "learning_rate": 9.345903713082304e-09,
1463
+ "logits/chosen": -0.02495613694190979,
1464
+ "logits/rejected": 0.08404093235731125,
1465
+ "logps/chosen": -360.6625061035156,
1466
+ "logps/rejected": -386.7431640625,
1467
+ "loss": 0.5094,
1468
+ "rewards/accuracies": 0.675000011920929,
1469
+ "rewards/chosen": -0.6419483423233032,
1470
+ "rewards/margins": 0.6380220651626587,
1471
+ "rewards/rejected": -1.279970407485962,
1472
+ "step": 880
1473
+ },
1474
+ {
1475
+ "epoch": 0.9314495028780743,
1476
+ "grad_norm": 11.521916154155225,
1477
+ "learning_rate": 7.030787065396865e-09,
1478
+ "logits/chosen": 0.1541689932346344,
1479
+ "logits/rejected": 0.43220075964927673,
1480
+ "logps/chosen": -333.72039794921875,
1481
+ "logps/rejected": -392.5670166015625,
1482
+ "loss": 0.5092,
1483
+ "rewards/accuracies": 0.737500011920929,
1484
+ "rewards/chosen": -0.6941490173339844,
1485
+ "rewards/margins": 0.8684225082397461,
1486
+ "rewards/rejected": -1.5625712871551514,
1487
+ "step": 890
1488
+ },
1489
+ {
1490
+ "epoch": 0.9419152276295133,
1491
+ "grad_norm": 14.786097584627694,
1492
+ "learning_rate": 5.04062020432286e-09,
1493
+ "logits/chosen": 0.10343728214502335,
1494
+ "logits/rejected": 0.26344841718673706,
1495
+ "logps/chosen": -381.5856018066406,
1496
+ "logps/rejected": -468.1319274902344,
1497
+ "loss": 0.4983,
1498
+ "rewards/accuracies": 0.7124999761581421,
1499
+ "rewards/chosen": -0.8143836855888367,
1500
+ "rewards/margins": 0.7843345999717712,
1501
+ "rewards/rejected": -1.5987184047698975,
1502
+ "step": 900
1503
+ },
1504
+ {
1505
+ "epoch": 0.9419152276295133,
1506
+ "eval_logits/chosen": -0.013600568287074566,
1507
+ "eval_logits/rejected": 0.2624098062515259,
1508
+ "eval_logps/chosen": -374.1649475097656,
1509
+ "eval_logps/rejected": -432.5990905761719,
1510
+ "eval_loss": 0.5130622386932373,
1511
+ "eval_rewards/accuracies": 0.7658730149269104,
1512
+ "eval_rewards/chosen": -0.6905492544174194,
1513
+ "eval_rewards/margins": 0.8222722411155701,
1514
+ "eval_rewards/rejected": -1.5128216743469238,
1515
+ "eval_runtime": 231.4541,
1516
+ "eval_samples_per_second": 8.641,
1517
+ "eval_steps_per_second": 0.272,
1518
+ "step": 900
1519
+ },
1520
+ {
1521
+ "epoch": 0.9523809523809523,
1522
+ "grad_norm": 17.98388390329237,
1523
+ "learning_rate": 3.3780648016376866e-09,
1524
+ "logits/chosen": 0.04564341902732849,
1525
+ "logits/rejected": 0.3436654210090637,
1526
+ "logps/chosen": -323.76763916015625,
1527
+ "logps/rejected": -363.12225341796875,
1528
+ "loss": 0.5159,
1529
+ "rewards/accuracies": 0.762499988079071,
1530
+ "rewards/chosen": -0.7020602822303772,
1531
+ "rewards/margins": 0.7331494092941284,
1532
+ "rewards/rejected": -1.4352096319198608,
1533
+ "step": 910
1534
+ },
1535
+ {
1536
+ "epoch": 0.9628466771323915,
1537
+ "grad_norm": 15.497923846134174,
1538
+ "learning_rate": 2.0453443778310766e-09,
1539
+ "logits/chosen": 0.021750714629888535,
1540
+ "logits/rejected": 0.3918168544769287,
1541
+ "logps/chosen": -381.59539794921875,
1542
+ "logps/rejected": -412.8221130371094,
1543
+ "loss": 0.5177,
1544
+ "rewards/accuracies": 0.7875000238418579,
1545
+ "rewards/chosen": -0.6423409581184387,
1546
+ "rewards/margins": 0.835128903388977,
1547
+ "rewards/rejected": -1.477469801902771,
1548
+ "step": 920
1549
+ },
1550
+ {
1551
+ "epoch": 0.9733124018838305,
1552
+ "grad_norm": 12.503315786051827,
1553
+ "learning_rate": 1.0442413283435758e-09,
1554
+ "logits/chosen": -0.060670822858810425,
1555
+ "logits/rejected": 0.328230619430542,
1556
+ "logps/chosen": -422.365234375,
1557
+ "logps/rejected": -407.7621765136719,
1558
+ "loss": 0.4789,
1559
+ "rewards/accuracies": 0.6875,
1560
+ "rewards/chosen": -0.6927906274795532,
1561
+ "rewards/margins": 0.6564691662788391,
1562
+ "rewards/rejected": -1.3492597341537476,
1563
+ "step": 930
1564
+ },
1565
+ {
1566
+ "epoch": 0.9837781266352695,
1567
+ "grad_norm": 12.580335513343133,
1568
+ "learning_rate": 3.760945397705828e-10,
1569
+ "logits/chosen": 0.11628331989049911,
1570
+ "logits/rejected": 0.15096750855445862,
1571
+ "logps/chosen": -334.9276428222656,
1572
+ "logps/rejected": -456.6903381347656,
1573
+ "loss": 0.5032,
1574
+ "rewards/accuracies": 0.6875,
1575
+ "rewards/chosen": -0.6851450800895691,
1576
+ "rewards/margins": 0.7425065040588379,
1577
+ "rewards/rejected": -1.4276516437530518,
1578
+ "step": 940
1579
+ },
1580
+ {
1581
+ "epoch": 0.9942438513867086,
1582
+ "grad_norm": 15.517565032289047,
1583
+ "learning_rate": 4.17975992204056e-11,
1584
+ "logits/chosen": -0.20424561202526093,
1585
+ "logits/rejected": 0.1298888772726059,
1586
+ "logps/chosen": -400.3074951171875,
1587
+ "logps/rejected": -450.6595153808594,
1588
+ "loss": 0.5105,
1589
+ "rewards/accuracies": 0.75,
1590
+ "rewards/chosen": -0.7332125306129456,
1591
+ "rewards/margins": 0.8044403195381165,
1592
+ "rewards/rejected": -1.537652850151062,
1593
+ "step": 950
1594
+ },
1595
+ {
1596
+ "epoch": 0.9994767137624281,
1597
+ "step": 955,
1598
+ "total_flos": 0.0,
1599
+ "train_loss": 0.5504645167845081,
1600
+ "train_runtime": 19044.9678,
1601
+ "train_samples_per_second": 3.21,
1602
+ "train_steps_per_second": 0.05
1603
+ }
1604
+ ],
1605
+ "logging_steps": 10,
1606
+ "max_steps": 955,
1607
+ "num_input_tokens_seen": 0,
1608
+ "num_train_epochs": 1,
1609
+ "save_steps": 100,
1610
+ "stateful_callbacks": {
1611
+ "TrainerControl": {
1612
+ "args": {
1613
+ "should_epoch_stop": false,
1614
+ "should_evaluate": false,
1615
+ "should_log": false,
1616
+ "should_save": true,
1617
+ "should_training_stop": true
1618
+ },
1619
+ "attributes": {}
1620
+ }
1621
+ },
1622
+ "total_flos": 0.0,
1623
+ "train_batch_size": 4,
1624
+ "trial_name": null,
1625
+ "trial_params": null
1626
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94721bd7a3a60be501d2cdf2d7d477d96f66b0c2bf18c254825d33f0c508ddf2
3
  size 7608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fae8eb2d0bd0b9b8c23b074d41f71a59bccbb31a96063eb67527f8c6659d7db1
3
  size 7608