|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09328720799160416, |
|
"eval_steps": 500, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.47774845361709595, |
|
"learning_rate": 4.999970160815579e-05, |
|
"loss": 2.0765, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.6051416397094727, |
|
"learning_rate": 4.999880643974619e-05, |
|
"loss": 2.2297, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.6161717772483826, |
|
"learning_rate": 4.9997314516140056e-05, |
|
"loss": 2.1103, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.4686434268951416, |
|
"learning_rate": 4.999522587295162e-05, |
|
"loss": 2.0057, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8412289023399353, |
|
"learning_rate": 4.999254056003963e-05, |
|
"loss": 2.1778, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.5333625078201294, |
|
"learning_rate": 4.99892586415061e-05, |
|
"loss": 2.2399, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.821148157119751, |
|
"learning_rate": 4.9985380195694856e-05, |
|
"loss": 2.3215, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8403909206390381, |
|
"learning_rate": 4.998090531518962e-05, |
|
"loss": 1.8295, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6633398532867432, |
|
"learning_rate": 4.9975834106811834e-05, |
|
"loss": 2.0195, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6386868357658386, |
|
"learning_rate": 4.997016669161806e-05, |
|
"loss": 2.1257, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7762248516082764, |
|
"learning_rate": 4.996390320489715e-05, |
|
"loss": 2.057, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.3192856311798096, |
|
"learning_rate": 4.9957043796166966e-05, |
|
"loss": 2.0753, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.9797518849372864, |
|
"learning_rate": 4.994958862917083e-05, |
|
"loss": 1.9736, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.000693440437317, |
|
"learning_rate": 4.994153788187363e-05, |
|
"loss": 2.1572, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6852813959121704, |
|
"learning_rate": 4.993289174645757e-05, |
|
"loss": 2.1491, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.0075691938400269, |
|
"learning_rate": 4.992365042931752e-05, |
|
"loss": 1.945, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 1.1973133087158203, |
|
"learning_rate": 4.991381415105619e-05, |
|
"loss": 2.0811, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.9927239418029785, |
|
"learning_rate": 4.990338314647881e-05, |
|
"loss": 1.961, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.9499759674072266, |
|
"learning_rate": 4.98923576645875e-05, |
|
"loss": 2.0653, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.7233040928840637, |
|
"learning_rate": 4.9880737968575365e-05, |
|
"loss": 1.9999, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 1.55235755443573, |
|
"learning_rate": 4.986852433582022e-05, |
|
"loss": 2.2258, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.9007890820503235, |
|
"learning_rate": 4.985571705787793e-05, |
|
"loss": 2.1034, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.6774860620498657, |
|
"learning_rate": 4.9842316440475475e-05, |
|
"loss": 2.1753, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.7676737308502197, |
|
"learning_rate": 4.9828322803503665e-05, |
|
"loss": 2.1384, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.9624544978141785, |
|
"learning_rate": 4.981373648100946e-05, |
|
"loss": 2.0521, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.9315722584724426, |
|
"learning_rate": 4.979855782118802e-05, |
|
"loss": 1.9256, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.9035864472389221, |
|
"learning_rate": 4.978278718637443e-05, |
|
"loss": 2.0882, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.7997236251831055, |
|
"learning_rate": 4.9766424953035e-05, |
|
"loss": 2.0724, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.0692921876907349, |
|
"learning_rate": 4.974947151175826e-05, |
|
"loss": 2.1329, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.9506180286407471, |
|
"learning_rate": 4.973192726724572e-05, |
|
"loss": 2.082, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.8647387027740479, |
|
"learning_rate": 4.9713792638302145e-05, |
|
"loss": 2.0366, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.105302095413208, |
|
"learning_rate": 4.969506805782555e-05, |
|
"loss": 2.1481, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.7593303918838501, |
|
"learning_rate": 4.967575397279689e-05, |
|
"loss": 2.032, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.7521979808807373, |
|
"learning_rate": 4.965585084426943e-05, |
|
"loss": 2.0379, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.947120726108551, |
|
"learning_rate": 4.9635359147357655e-05, |
|
"loss": 2.1444, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 1.2184454202651978, |
|
"learning_rate": 4.961427937122598e-05, |
|
"loss": 1.9164, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 1.221663475036621, |
|
"learning_rate": 4.959261201907707e-05, |
|
"loss": 2.0084, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 1.0457361936569214, |
|
"learning_rate": 4.957035760813982e-05, |
|
"loss": 2.2032, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.8834909200668335, |
|
"learning_rate": 4.954751666965701e-05, |
|
"loss": 2.2101, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.791902482509613, |
|
"learning_rate": 4.9524089748872615e-05, |
|
"loss": 2.0472, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 1.2905739545822144, |
|
"learning_rate": 4.9500077405018807e-05, |
|
"loss": 2.0987, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.8612006306648254, |
|
"learning_rate": 4.9475480211302583e-05, |
|
"loss": 2.1765, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 1.3128459453582764, |
|
"learning_rate": 4.945029875489212e-05, |
|
"loss": 1.9926, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.9610918164253235, |
|
"learning_rate": 4.94245336369027e-05, |
|
"loss": 2.0124, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.873160183429718, |
|
"learning_rate": 4.939818547238241e-05, |
|
"loss": 2.2229, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 1.5535285472869873, |
|
"learning_rate": 4.9371254890297446e-05, |
|
"loss": 2.2013, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 1.1951836347579956, |
|
"learning_rate": 4.93437425335171e-05, |
|
"loss": 2.014, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.7874170541763306, |
|
"learning_rate": 4.9315649058798384e-05, |
|
"loss": 2.1701, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.3503323793411255, |
|
"learning_rate": 4.928697513677042e-05, |
|
"loss": 2.1681, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.3091179132461548, |
|
"learning_rate": 4.925772145191834e-05, |
|
"loss": 2.1224, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.4428555965423584, |
|
"learning_rate": 4.9227888702567044e-05, |
|
"loss": 2.0512, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.8234395980834961, |
|
"learning_rate": 4.9197477600864446e-05, |
|
"loss": 2.1067, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.9094969034194946, |
|
"learning_rate": 4.9166488872764526e-05, |
|
"loss": 1.8884, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.0074087381362915, |
|
"learning_rate": 4.913492325800999e-05, |
|
"loss": 1.9345, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 1.0867297649383545, |
|
"learning_rate": 4.910278151011458e-05, |
|
"loss": 2.1928, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.6842357516288757, |
|
"learning_rate": 4.907006439634516e-05, |
|
"loss": 2.0407, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.8409023284912109, |
|
"learning_rate": 4.903677269770329e-05, |
|
"loss": 2.2344, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.8119503259658813, |
|
"learning_rate": 4.900290720890671e-05, |
|
"loss": 2.1296, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.9938147068023682, |
|
"learning_rate": 4.8968468738370244e-05, |
|
"loss": 2.152, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.9865244030952454, |
|
"learning_rate": 4.8933458108186606e-05, |
|
"loss": 1.9623, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3215, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 4027137608908800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|