File size: 3,163 Bytes
ddcf16e
 
 
 
 
 
 
 
 
 
 
 
e611208
ddcf16e
 
 
 
 
 
 
 
 
 
 
 
 
 
e611208
ddcf16e
e611208
 
 
 
ddcf16e
e611208
 
 
 
ddcf16e
 
 
 
e611208
ddcf16e
e611208
 
 
 
 
 
 
 
 
ddcf16e
 
 
 
e611208
ddcf16e
e611208
 
 
 
 
 
 
 
 
ddcf16e
 
 
 
e611208
ddcf16e
e611208
 
 
 
 
 
 
 
 
ddcf16e
 
 
 
 
 
e611208
 
 
ddcf16e
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 500,
  "global_step": 40,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.025,
      "grad_norm": 6.6984829306295985,
      "learning_rate": 1.25e-07,
      "logits/chosen": -2.8582587242126465,
      "logits/rejected": -2.842068910598755,
      "logps/chosen": -261.8958435058594,
      "logps/rejected": -226.5897216796875,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.25,
      "grad_norm": 6.880940565037714,
      "learning_rate": 4.6650635094610966e-07,
      "logits/chosen": -2.781381130218506,
      "logits/rejected": -2.7730088233947754,
      "logps/chosen": -254.4922637939453,
      "logps/rejected": -251.15196228027344,
      "loss": 0.6908,
      "rewards/accuracies": 0.5277777910232544,
      "rewards/chosen": 0.005487086251378059,
      "rewards/margins": 0.0038041824009269476,
      "rewards/rejected": 0.0016829040832817554,
      "step": 10
    },
    {
      "epoch": 0.5,
      "grad_norm": 6.415866483339294,
      "learning_rate": 2.934120444167326e-07,
      "logits/chosen": -2.771768569946289,
      "logits/rejected": -2.744568347930908,
      "logps/chosen": -265.42926025390625,
      "logps/rejected": -252.08932495117188,
      "loss": 0.6741,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.03352969139814377,
      "rewards/margins": 0.04392678663134575,
      "rewards/rejected": -0.010397094301879406,
      "step": 20
    },
    {
      "epoch": 0.75,
      "grad_norm": 7.499103181183479,
      "learning_rate": 8.930309757836516e-08,
      "logits/chosen": -2.7617874145507812,
      "logits/rejected": -2.7403435707092285,
      "logps/chosen": -258.38079833984375,
      "logps/rejected": -249.26309204101562,
      "loss": 0.6514,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": -0.02342965081334114,
      "rewards/margins": 0.07397619634866714,
      "rewards/rejected": -0.09740584343671799,
      "step": 30
    },
    {
      "epoch": 1.0,
      "grad_norm": 7.140558621795842,
      "learning_rate": 0.0,
      "logits/chosen": -2.7583961486816406,
      "logits/rejected": -2.7331676483154297,
      "logps/chosen": -266.16204833984375,
      "logps/rejected": -273.2425537109375,
      "loss": 0.6467,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -0.05741094797849655,
      "rewards/margins": 0.062062572687864304,
      "rewards/rejected": -0.11947351694107056,
      "step": 40
    },
    {
      "epoch": 1.0,
      "step": 40,
      "total_flos": 0.0,
      "train_loss": 0.6658271312713623,
      "train_runtime": 1112.3976,
      "train_samples_per_second": 9.159,
      "train_steps_per_second": 0.036
    }
  ],
  "logging_steps": 10,
  "max_steps": 40,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}