|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.008678389724786566, |
|
"eval_steps": 100, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.1695974311966415e-05, |
|
"eval_loss": 11.935809135437012, |
|
"eval_runtime": 293.8099, |
|
"eval_samples_per_second": 66.053, |
|
"eval_steps_per_second": 33.028, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00010847987155983208, |
|
"grad_norm": 0.15566453337669373, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 11.9341, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00021695974311966416, |
|
"grad_norm": 0.13513855636119843, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 11.9339, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0003254396146794962, |
|
"grad_norm": 0.12842746078968048, |
|
"learning_rate": 5e-05, |
|
"loss": 11.937, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0004339194862393283, |
|
"grad_norm": 0.17119921743869781, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 11.9367, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0005423993577991604, |
|
"grad_norm": 0.20038500428199768, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 11.9327, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0006508792293589925, |
|
"grad_norm": 0.17738866806030273, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9328, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0007593591009188245, |
|
"grad_norm": 0.141236811876297, |
|
"learning_rate": 9.995494831023409e-05, |
|
"loss": 11.931, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0008678389724786566, |
|
"grad_norm": 0.2033369392156601, |
|
"learning_rate": 9.981987442712633e-05, |
|
"loss": 11.933, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0009763188440384886, |
|
"grad_norm": 0.1696261763572693, |
|
"learning_rate": 9.959502176294383e-05, |
|
"loss": 11.9278, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0010847987155983207, |
|
"grad_norm": 0.24034294486045837, |
|
"learning_rate": 9.928079551738543e-05, |
|
"loss": 11.9221, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0011932785871581527, |
|
"grad_norm": 0.21366427838802338, |
|
"learning_rate": 9.887776194738432e-05, |
|
"loss": 11.9258, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.001301758458717985, |
|
"grad_norm": 0.2040693759918213, |
|
"learning_rate": 9.838664734667495e-05, |
|
"loss": 11.9222, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0014102383302778169, |
|
"grad_norm": 0.25024330615997314, |
|
"learning_rate": 9.780833673696254e-05, |
|
"loss": 11.921, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.001518718201837649, |
|
"grad_norm": 0.2731180787086487, |
|
"learning_rate": 9.714387227305422e-05, |
|
"loss": 11.912, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.001627198073397481, |
|
"grad_norm": 0.26710769534111023, |
|
"learning_rate": 9.639445136482548e-05, |
|
"loss": 11.9104, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0017356779449573133, |
|
"grad_norm": 0.28881633281707764, |
|
"learning_rate": 9.55614245194068e-05, |
|
"loss": 11.9022, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0018441578165171453, |
|
"grad_norm": 0.3406215012073517, |
|
"learning_rate": 9.464629290747842e-05, |
|
"loss": 11.8949, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.0019526376880769772, |
|
"grad_norm": 0.40581774711608887, |
|
"learning_rate": 9.365070565805941e-05, |
|
"loss": 11.8852, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0020611175596368094, |
|
"grad_norm": 0.35484281182289124, |
|
"learning_rate": 9.257645688666556e-05, |
|
"loss": 11.8833, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0021695974311966414, |
|
"grad_norm": 0.30402904748916626, |
|
"learning_rate": 9.142548246219212e-05, |
|
"loss": 11.8834, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0021695974311966414, |
|
"eval_loss": 11.879146575927734, |
|
"eval_runtime": 294.6625, |
|
"eval_samples_per_second": 65.862, |
|
"eval_steps_per_second": 32.933, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0022780773027564734, |
|
"grad_norm": 0.28309234976768494, |
|
"learning_rate": 9.019985651834703e-05, |
|
"loss": 11.8862, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.0023865571743163054, |
|
"grad_norm": 0.2482827603816986, |
|
"learning_rate": 8.890178771592199e-05, |
|
"loss": 11.8759, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.002495037045876138, |
|
"grad_norm": 0.3396661877632141, |
|
"learning_rate": 8.753361526263621e-05, |
|
"loss": 11.8619, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.00260351691743597, |
|
"grad_norm": 0.29237067699432373, |
|
"learning_rate": 8.609780469772623e-05, |
|
"loss": 11.8653, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.002711996788995802, |
|
"grad_norm": 0.2691107988357544, |
|
"learning_rate": 8.459694344887732e-05, |
|
"loss": 11.8656, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.0028204766605556338, |
|
"grad_norm": 0.2159227430820465, |
|
"learning_rate": 8.303373616950408e-05, |
|
"loss": 11.8707, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0029289565321154658, |
|
"grad_norm": 0.29222938418388367, |
|
"learning_rate": 8.141099986478212e-05, |
|
"loss": 11.8527, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.003037436403675298, |
|
"grad_norm": 0.2999395430088043, |
|
"learning_rate": 7.973165881521434e-05, |
|
"loss": 11.8495, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.00314591627523513, |
|
"grad_norm": 0.18667206168174744, |
|
"learning_rate": 7.799873930687978e-05, |
|
"loss": 11.8491, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.003254396146794962, |
|
"grad_norm": 0.21483804285526276, |
|
"learning_rate": 7.621536417786159e-05, |
|
"loss": 11.858, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.003362876018354794, |
|
"grad_norm": 0.2531699240207672, |
|
"learning_rate": 7.438474719068173e-05, |
|
"loss": 11.8516, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.0034713558899146266, |
|
"grad_norm": 0.23763595521450043, |
|
"learning_rate": 7.251018724088367e-05, |
|
"loss": 11.8592, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0035798357614744585, |
|
"grad_norm": 0.15770548582077026, |
|
"learning_rate": 7.059506241219965e-05, |
|
"loss": 11.8438, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.0036883156330342905, |
|
"grad_norm": 0.1576322615146637, |
|
"learning_rate": 6.864282388901544e-05, |
|
"loss": 11.8531, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0037967955045941225, |
|
"grad_norm": 0.18687373399734497, |
|
"learning_rate": 6.665698973710288e-05, |
|
"loss": 11.8449, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.0039052753761539545, |
|
"grad_norm": 0.21958865225315094, |
|
"learning_rate": 6.464113856382752e-05, |
|
"loss": 11.8447, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.004013755247713787, |
|
"grad_norm": 0.1973612904548645, |
|
"learning_rate": 6.259890306925627e-05, |
|
"loss": 11.8371, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.004122235119273619, |
|
"grad_norm": 0.19669975340366364, |
|
"learning_rate": 6.0533963499786314e-05, |
|
"loss": 11.8471, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.004230714990833451, |
|
"grad_norm": 0.18256022036075592, |
|
"learning_rate": 5.8450041016092464e-05, |
|
"loss": 11.8292, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.004339194862393283, |
|
"grad_norm": 0.2624127268791199, |
|
"learning_rate": 5.6350890987343944e-05, |
|
"loss": 11.8375, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.004339194862393283, |
|
"eval_loss": 11.839664459228516, |
|
"eval_runtime": 295.3522, |
|
"eval_samples_per_second": 65.708, |
|
"eval_steps_per_second": 32.856, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.004447674733953115, |
|
"grad_norm": 0.1508708894252777, |
|
"learning_rate": 5.4240296223775465e-05, |
|
"loss": 11.8484, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.004556154605512947, |
|
"grad_norm": 0.13817380368709564, |
|
"learning_rate": 5.212206015980742e-05, |
|
"loss": 11.8426, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.004664634477072779, |
|
"grad_norm": 0.14686021208763123, |
|
"learning_rate": 5e-05, |
|
"loss": 11.8489, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.004773114348632611, |
|
"grad_norm": 0.15063568949699402, |
|
"learning_rate": 4.78779398401926e-05, |
|
"loss": 11.8389, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.004881594220192444, |
|
"grad_norm": 0.16536208987236023, |
|
"learning_rate": 4.575970377622456e-05, |
|
"loss": 11.842, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.004990074091752276, |
|
"grad_norm": 0.1842128187417984, |
|
"learning_rate": 4.364910901265606e-05, |
|
"loss": 11.8426, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.005098553963312108, |
|
"grad_norm": 0.1707184910774231, |
|
"learning_rate": 4.1549958983907555e-05, |
|
"loss": 11.8401, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.00520703383487194, |
|
"grad_norm": 0.17228449881076813, |
|
"learning_rate": 3.94660365002137e-05, |
|
"loss": 11.8237, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.005315513706431772, |
|
"grad_norm": 0.18822325766086578, |
|
"learning_rate": 3.740109693074375e-05, |
|
"loss": 11.8214, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.005423993577991604, |
|
"grad_norm": 0.15260934829711914, |
|
"learning_rate": 3.5358861436172485e-05, |
|
"loss": 11.8264, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0055324734495514356, |
|
"grad_norm": 0.10711085051298141, |
|
"learning_rate": 3.334301026289712e-05, |
|
"loss": 11.8456, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.0056409533211112676, |
|
"grad_norm": 0.14801694452762604, |
|
"learning_rate": 3.135717611098458e-05, |
|
"loss": 11.837, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.0057494331926710995, |
|
"grad_norm": 0.10283401608467102, |
|
"learning_rate": 2.9404937587800375e-05, |
|
"loss": 11.8411, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.0058579130642309315, |
|
"grad_norm": 0.11635620892047882, |
|
"learning_rate": 2.748981275911633e-05, |
|
"loss": 11.8293, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.005966392935790764, |
|
"grad_norm": 0.10565590113401413, |
|
"learning_rate": 2.5615252809318284e-05, |
|
"loss": 11.8305, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.006074872807350596, |
|
"grad_norm": 0.10437976568937302, |
|
"learning_rate": 2.3784635822138424e-05, |
|
"loss": 11.8304, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.006183352678910428, |
|
"grad_norm": 0.16932187974452972, |
|
"learning_rate": 2.2001260693120233e-05, |
|
"loss": 11.828, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.00629183255047026, |
|
"grad_norm": 0.22562319040298462, |
|
"learning_rate": 2.026834118478567e-05, |
|
"loss": 11.8261, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.006400312422030092, |
|
"grad_norm": 0.1617041528224945, |
|
"learning_rate": 1.858900013521788e-05, |
|
"loss": 11.8353, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.006508792293589924, |
|
"grad_norm": 0.22705136239528656, |
|
"learning_rate": 1.6966263830495936e-05, |
|
"loss": 11.8313, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.006508792293589924, |
|
"eval_loss": 11.834197998046875, |
|
"eval_runtime": 296.4704, |
|
"eval_samples_per_second": 65.46, |
|
"eval_steps_per_second": 32.732, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.006617272165149756, |
|
"grad_norm": 0.19286486506462097, |
|
"learning_rate": 1.5403056551122697e-05, |
|
"loss": 11.8476, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.006725752036709588, |
|
"grad_norm": 0.12702083587646484, |
|
"learning_rate": 1.3902195302273779e-05, |
|
"loss": 11.8422, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.00683423190826942, |
|
"grad_norm": 0.11417343467473984, |
|
"learning_rate": 1.246638473736378e-05, |
|
"loss": 11.8398, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.006942711779829253, |
|
"grad_norm": 0.10566074401140213, |
|
"learning_rate": 1.1098212284078036e-05, |
|
"loss": 11.8339, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.007051191651389085, |
|
"grad_norm": 0.12960262596607208, |
|
"learning_rate": 9.800143481652979e-06, |
|
"loss": 11.8344, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.007159671522948917, |
|
"grad_norm": 0.11917758733034134, |
|
"learning_rate": 8.574517537807897e-06, |
|
"loss": 11.8281, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.007268151394508749, |
|
"grad_norm": 0.18406090140342712, |
|
"learning_rate": 7.423543113334436e-06, |
|
"loss": 11.8358, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.007376631266068581, |
|
"grad_norm": 0.1254497766494751, |
|
"learning_rate": 6.349294341940593e-06, |
|
"loss": 11.8402, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.007485111137628413, |
|
"grad_norm": 0.19740301370620728, |
|
"learning_rate": 5.353707092521582e-06, |
|
"loss": 11.8325, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.007593591009188245, |
|
"grad_norm": 0.28371068835258484, |
|
"learning_rate": 4.43857548059321e-06, |
|
"loss": 11.8434, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.007702070880748077, |
|
"grad_norm": 0.11046874523162842, |
|
"learning_rate": 3.605548635174533e-06, |
|
"loss": 11.8349, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.007810550752307909, |
|
"grad_norm": 0.14138200879096985, |
|
"learning_rate": 2.85612772694579e-06, |
|
"loss": 11.836, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.007919030623867741, |
|
"grad_norm": 0.12249435484409332, |
|
"learning_rate": 2.191663263037458e-06, |
|
"loss": 11.8327, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.008027510495427574, |
|
"grad_norm": 0.08023572713136673, |
|
"learning_rate": 1.6133526533250565e-06, |
|
"loss": 11.8353, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.008135990366987405, |
|
"grad_norm": 0.12616612017154694, |
|
"learning_rate": 1.1222380526156928e-06, |
|
"loss": 11.8323, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.008244470238547238, |
|
"grad_norm": 0.13970719277858734, |
|
"learning_rate": 7.192044826145771e-07, |
|
"loss": 11.8326, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.008352950110107069, |
|
"grad_norm": 0.16991889476776123, |
|
"learning_rate": 4.049782370561583e-07, |
|
"loss": 11.8286, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.008461429981666902, |
|
"grad_norm": 0.23203876614570618, |
|
"learning_rate": 1.8012557287367392e-07, |
|
"loss": 11.836, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.008569909853226735, |
|
"grad_norm": 0.13499905169010162, |
|
"learning_rate": 4.5051689765929214e-08, |
|
"loss": 11.835, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.008678389724786566, |
|
"grad_norm": 0.2364356517791748, |
|
"learning_rate": 0.0, |
|
"loss": 11.8392, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.008678389724786566, |
|
"eval_loss": 11.83340835571289, |
|
"eval_runtime": 296.0254, |
|
"eval_samples_per_second": 65.559, |
|
"eval_steps_per_second": 32.781, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 100187325333504.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|