|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 5000.0, |
|
"global_step": 741, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01349527665317139, |
|
"grad_norm": 7.016904524087463, |
|
"learning_rate": 8.695652173913043e-07, |
|
"loss": 1.1369, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02699055330634278, |
|
"grad_norm": 4.2020279077827905, |
|
"learning_rate": 1.7391304347826085e-06, |
|
"loss": 1.0452, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04048582995951417, |
|
"grad_norm": 3.441796090298099, |
|
"learning_rate": 1.999530989041473e-06, |
|
"loss": 0.9533, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05398110661268556, |
|
"grad_norm": 3.5191614427600753, |
|
"learning_rate": 1.9972348515341017e-06, |
|
"loss": 0.8914, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06747638326585695, |
|
"grad_norm": 2.9690875149374367, |
|
"learning_rate": 1.9930298323185945e-06, |
|
"loss": 0.8475, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08097165991902834, |
|
"grad_norm": 3.0556169559708475, |
|
"learning_rate": 1.986923980536286e-06, |
|
"loss": 0.8232, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09446693657219973, |
|
"grad_norm": 2.9704481548374653, |
|
"learning_rate": 1.9789289838540896e-06, |
|
"loss": 0.822, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.10796221322537113, |
|
"grad_norm": 2.999043629121677, |
|
"learning_rate": 1.969060146092264e-06, |
|
"loss": 0.791, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1214574898785425, |
|
"grad_norm": 3.17575389006719, |
|
"learning_rate": 1.9573363579302263e-06, |
|
"loss": 0.806, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1349527665317139, |
|
"grad_norm": 2.9165449315999563, |
|
"learning_rate": 1.943780060746493e-06, |
|
"loss": 0.7961, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1484480431848853, |
|
"grad_norm": 2.952637325540401, |
|
"learning_rate": 1.928417203661959e-06, |
|
"loss": 0.7962, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.16194331983805668, |
|
"grad_norm": 3.014781591451641, |
|
"learning_rate": 1.911277193868751e-06, |
|
"loss": 0.7781, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 2.8796052156819716, |
|
"learning_rate": 1.8923928403397207e-06, |
|
"loss": 0.776, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.18893387314439947, |
|
"grad_norm": 2.8482523150497117, |
|
"learning_rate": 1.8718002910263424e-06, |
|
"loss": 0.7615, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.20242914979757085, |
|
"grad_norm": 2.922903791793236, |
|
"learning_rate": 1.8495389636652184e-06, |
|
"loss": 0.7543, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.21592442645074225, |
|
"grad_norm": 2.9536979340238547, |
|
"learning_rate": 1.8256514703256447e-06, |
|
"loss": 0.7598, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.22941970310391363, |
|
"grad_norm": 3.1334920821882126, |
|
"learning_rate": 1.8001835358426684e-06, |
|
"loss": 0.7489, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.242914979757085, |
|
"grad_norm": 3.2092630967068683, |
|
"learning_rate": 1.7731839102917642e-06, |
|
"loss": 0.7551, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.2564102564102564, |
|
"grad_norm": 2.987735102823792, |
|
"learning_rate": 1.7447042756726754e-06, |
|
"loss": 0.7503, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2699055330634278, |
|
"grad_norm": 2.979147030983217, |
|
"learning_rate": 1.7147991469810365e-06, |
|
"loss": 0.7528, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2834008097165992, |
|
"grad_norm": 3.1413426729820904, |
|
"learning_rate": 1.6835257678571512e-06, |
|
"loss": 0.7387, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.2968960863697706, |
|
"grad_norm": 3.1214265634728897, |
|
"learning_rate": 1.650944001011663e-06, |
|
"loss": 0.7281, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.31039136302294196, |
|
"grad_norm": 2.978380133818548, |
|
"learning_rate": 1.6171162136378713e-06, |
|
"loss": 0.7514, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.32388663967611336, |
|
"grad_norm": 2.85376146602686, |
|
"learning_rate": 1.5821071580300269e-06, |
|
"loss": 0.7403, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.33738191632928477, |
|
"grad_norm": 3.1598761470719734, |
|
"learning_rate": 1.5459838476361322e-06, |
|
"loss": 0.7435, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 2.821356522721463, |
|
"learning_rate": 1.5088154287824932e-06, |
|
"loss": 0.7209, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.3643724696356275, |
|
"grad_norm": 2.98057980294587, |
|
"learning_rate": 1.4706730483155736e-06, |
|
"loss": 0.7311, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.37786774628879893, |
|
"grad_norm": 3.0364233244848364, |
|
"learning_rate": 1.4316297174145016e-06, |
|
"loss": 0.7282, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3913630229419703, |
|
"grad_norm": 2.949886690990857, |
|
"learning_rate": 1.391760171834918e-06, |
|
"loss": 0.7078, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.4048582995951417, |
|
"grad_norm": 2.9507364249680053, |
|
"learning_rate": 1.3511407288516878e-06, |
|
"loss": 0.7201, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4183535762483131, |
|
"grad_norm": 2.827650772154331, |
|
"learning_rate": 1.3098491411743014e-06, |
|
"loss": 0.7147, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.4318488529014845, |
|
"grad_norm": 2.8781557252440635, |
|
"learning_rate": 1.267964448114608e-06, |
|
"loss": 0.7295, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.44534412955465585, |
|
"grad_norm": 2.952298995368144, |
|
"learning_rate": 1.2255668242917648e-06, |
|
"loss": 0.724, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.45883940620782726, |
|
"grad_norm": 3.121925726144687, |
|
"learning_rate": 1.1827374261640126e-06, |
|
"loss": 0.697, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.47233468286099867, |
|
"grad_norm": 2.852801476396792, |
|
"learning_rate": 1.1395582366810346e-06, |
|
"loss": 0.718, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.48582995951417, |
|
"grad_norm": 3.0303857364861955, |
|
"learning_rate": 1.0961119083542726e-06, |
|
"loss": 0.7197, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.4993252361673414, |
|
"grad_norm": 3.166667043296761, |
|
"learning_rate": 1.05248160504558e-06, |
|
"loss": 0.7091, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 2.906467539598673, |
|
"learning_rate": 1.0087508427770638e-06, |
|
"loss": 0.6934, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 3.02782546633098, |
|
"learning_rate": 9.650033298668279e-07, |
|
"loss": 0.7025, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5398110661268556, |
|
"grad_norm": 2.8912523592077637, |
|
"learning_rate": 9.213228066966326e-07, |
|
"loss": 0.7033, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.553306342780027, |
|
"grad_norm": 2.82347772754047, |
|
"learning_rate": 8.777928854181709e-07, |
|
"loss": 0.7, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.5668016194331984, |
|
"grad_norm": 2.9229669660658293, |
|
"learning_rate": 8.344968899048091e-07, |
|
"loss": 0.7022, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.5802968960863698, |
|
"grad_norm": 3.1576009691196707, |
|
"learning_rate": 7.915176962551347e-07, |
|
"loss": 0.7037, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.5937921727395412, |
|
"grad_norm": 3.1084612640931173, |
|
"learning_rate": 7.489375741536281e-07, |
|
"loss": 0.7073, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.6072874493927125, |
|
"grad_norm": 2.9866909336253595, |
|
"learning_rate": 7.068380293921141e-07, |
|
"loss": 0.6877, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.6207827260458839, |
|
"grad_norm": 2.8444556383038053, |
|
"learning_rate": 6.652996478534394e-07, |
|
"loss": 0.6996, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.6342780026990553, |
|
"grad_norm": 3.073223609545424, |
|
"learning_rate": 6.244019412560143e-07, |
|
"loss": 0.6941, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.6477732793522267, |
|
"grad_norm": 2.8831703732503753, |
|
"learning_rate": 5.842231949544962e-07, |
|
"loss": 0.7049, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6612685560053981, |
|
"grad_norm": 3.060533732204664, |
|
"learning_rate": 5.448403180879439e-07, |
|
"loss": 0.679, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.6747638326585695, |
|
"grad_norm": 2.9803982323010585, |
|
"learning_rate": 5.063286963622902e-07, |
|
"loss": 0.6972, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6882591093117408, |
|
"grad_norm": 2.8648873659535794, |
|
"learning_rate": 4.687620477489337e-07, |
|
"loss": 0.6894, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 2.8594131054352054, |
|
"learning_rate": 4.3221228137566223e-07, |
|
"loss": 0.6984, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.7152496626180836, |
|
"grad_norm": 2.826483265916211, |
|
"learning_rate": 3.9674935988002325e-07, |
|
"loss": 0.6806, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.728744939271255, |
|
"grad_norm": 3.0466631136136875, |
|
"learning_rate": 3.624411654886108e-07, |
|
"loss": 0.7028, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.7422402159244265, |
|
"grad_norm": 2.8395840111896806, |
|
"learning_rate": 3.293533700786286e-07, |
|
"loss": 0.6966, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.7557354925775979, |
|
"grad_norm": 2.9054816392819154, |
|
"learning_rate": 2.975493094704435e-07, |
|
"loss": 0.6908, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 2.9460246471571345, |
|
"learning_rate": 2.670898621917629e-07, |
|
"loss": 0.6947, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.7827260458839406, |
|
"grad_norm": 2.941971675288718, |
|
"learning_rate": 2.3803333294549644e-07, |
|
"loss": 0.6795, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.796221322537112, |
|
"grad_norm": 3.0680540747802527, |
|
"learning_rate": 2.104353410043712e-07, |
|
"loss": 0.6743, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.8097165991902834, |
|
"grad_norm": 3.1007189679330374, |
|
"learning_rate": 1.843487137459261e-07, |
|
"loss": 0.6941, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8232118758434548, |
|
"grad_norm": 2.8712948349597625, |
|
"learning_rate": 1.598233855316856e-07, |
|
"loss": 0.6827, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.8367071524966262, |
|
"grad_norm": 3.028709196939323, |
|
"learning_rate": 1.369063021240665e-07, |
|
"loss": 0.7058, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.8502024291497976, |
|
"grad_norm": 2.8663884949478655, |
|
"learning_rate": 1.1564133082398942e-07, |
|
"loss": 0.6926, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.863697705802969, |
|
"grad_norm": 2.7965685313183983, |
|
"learning_rate": 9.606917650120083e-08, |
|
"loss": 0.6902, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.8771929824561403, |
|
"grad_norm": 2.9702187500416426, |
|
"learning_rate": 7.822730367804331e-08, |
|
"loss": 0.6809, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.8906882591093117, |
|
"grad_norm": 2.9740711603347534, |
|
"learning_rate": 6.214986481581364e-08, |
|
"loss": 0.6846, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.9041835357624831, |
|
"grad_norm": 3.108653359820244, |
|
"learning_rate": 4.786763494098689e-08, |
|
"loss": 0.6845, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.9176788124156545, |
|
"grad_norm": 3.0730509775220005, |
|
"learning_rate": 3.540795273643926e-08, |
|
"loss": 0.6942, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.9311740890688259, |
|
"grad_norm": 2.8851954067191885, |
|
"learning_rate": 2.479466821043419e-08, |
|
"loss": 0.6858, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.9446693657219973, |
|
"grad_norm": 2.7753430457192816, |
|
"learning_rate": 1.604809704353949e-08, |
|
"loss": 0.6911, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9581646423751687, |
|
"grad_norm": 2.894348481910036, |
|
"learning_rate": 9.184981700866346e-09, |
|
"loss": 0.6892, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.97165991902834, |
|
"grad_norm": 2.9242281390477505, |
|
"learning_rate": 4.218459384065953e-09, |
|
"loss": 0.6878, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.9851551956815114, |
|
"grad_norm": 3.0382180715715568, |
|
"learning_rate": 1.1580368844316125e-09, |
|
"loss": 0.6897, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.9986504723346828, |
|
"grad_norm": 3.0414366034886235, |
|
"learning_rate": 9.572385238243441e-12, |
|
"loss": 0.6865, |
|
"step": 740 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 741, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 350, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.306076234857578e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|