File size: 4,500 Bytes
a28ecf2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
{
  "best_metric": 0.4411565661430359,
  "best_model_checkpoint": "autotrain-3c51k-w49bn/checkpoint-69",
  "epoch": 3.0,
  "eval_steps": 500,
  "global_step": 69,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.17391304347826086,
      "grad_norm": 3.44027042388916,
      "learning_rate": 1.7142857142857142e-05,
      "loss": 0.7869,
      "step": 4
    },
    {
      "epoch": 0.34782608695652173,
      "grad_norm": 1.9074159860610962,
      "learning_rate": 2.9516129032258067e-05,
      "loss": 0.7022,
      "step": 8
    },
    {
      "epoch": 0.5217391304347826,
      "grad_norm": 3.345827579498291,
      "learning_rate": 2.758064516129032e-05,
      "loss": 0.6482,
      "step": 12
    },
    {
      "epoch": 0.6956521739130435,
      "grad_norm": 2.091654062271118,
      "learning_rate": 2.5645161290322582e-05,
      "loss": 0.5571,
      "step": 16
    },
    {
      "epoch": 0.8695652173913043,
      "grad_norm": 1.436171054840088,
      "learning_rate": 2.370967741935484e-05,
      "loss": 0.5698,
      "step": 20
    },
    {
      "epoch": 1.0,
      "eval_loss": 0.5249526500701904,
      "eval_runtime": 3.0089,
      "eval_samples_per_second": 60.487,
      "eval_steps_per_second": 3.988,
      "step": 23
    },
    {
      "epoch": 1.0434782608695652,
      "grad_norm": 1.838670015335083,
      "learning_rate": 2.1774193548387097e-05,
      "loss": 0.4771,
      "step": 24
    },
    {
      "epoch": 1.2173913043478262,
      "grad_norm": 1.2238214015960693,
      "learning_rate": 1.9838709677419355e-05,
      "loss": 0.444,
      "step": 28
    },
    {
      "epoch": 1.391304347826087,
      "grad_norm": 2.102836847305298,
      "learning_rate": 1.7903225806451616e-05,
      "loss": 0.6149,
      "step": 32
    },
    {
      "epoch": 1.5652173913043477,
      "grad_norm": 1.8485645055770874,
      "learning_rate": 1.596774193548387e-05,
      "loss": 0.5523,
      "step": 36
    },
    {
      "epoch": 1.7391304347826086,
      "grad_norm": 2.7359752655029297,
      "learning_rate": 1.403225806451613e-05,
      "loss": 0.4806,
      "step": 40
    },
    {
      "epoch": 1.9130434782608696,
      "grad_norm": 1.4217264652252197,
      "learning_rate": 1.2096774193548387e-05,
      "loss": 0.4623,
      "step": 44
    },
    {
      "epoch": 2.0,
      "eval_loss": 0.46536698937416077,
      "eval_runtime": 3.1677,
      "eval_samples_per_second": 57.456,
      "eval_steps_per_second": 3.788,
      "step": 46
    },
    {
      "epoch": 2.0869565217391304,
      "grad_norm": 1.995253562927246,
      "learning_rate": 1.0161290322580644e-05,
      "loss": 0.4039,
      "step": 48
    },
    {
      "epoch": 2.260869565217391,
      "grad_norm": 2.4988834857940674,
      "learning_rate": 8.225806451612904e-06,
      "loss": 0.47,
      "step": 52
    },
    {
      "epoch": 2.4347826086956523,
      "grad_norm": 1.7685775756835938,
      "learning_rate": 6.290322580645162e-06,
      "loss": 0.3878,
      "step": 56
    },
    {
      "epoch": 2.608695652173913,
      "grad_norm": 2.7405200004577637,
      "learning_rate": 4.35483870967742e-06,
      "loss": 0.5158,
      "step": 60
    },
    {
      "epoch": 2.782608695652174,
      "grad_norm": 3.4083738327026367,
      "learning_rate": 2.4193548387096776e-06,
      "loss": 0.5203,
      "step": 64
    },
    {
      "epoch": 2.9565217391304346,
      "grad_norm": 1.6851589679718018,
      "learning_rate": 4.838709677419355e-07,
      "loss": 0.4446,
      "step": 68
    },
    {
      "epoch": 3.0,
      "eval_loss": 0.4411565661430359,
      "eval_runtime": 1.7496,
      "eval_samples_per_second": 104.022,
      "eval_steps_per_second": 6.859,
      "step": 69
    }
  ],
  "logging_steps": 4,
  "max_steps": 69,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 500,
  "stateful_callbacks": {
    "EarlyStoppingCallback": {
      "args": {
        "early_stopping_patience": 5,
        "early_stopping_threshold": 0.01
      },
      "attributes": {
        "early_stopping_patience_counter": 0
      }
    },
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}