|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04366335552887239, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00021831677764436196, |
|
"grad_norm": 0.22467271983623505, |
|
"learning_rate": 2e-05, |
|
"loss": 0.8333, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00021831677764436196, |
|
"eval_loss": 0.9435501098632812, |
|
"eval_runtime": 183.8328, |
|
"eval_samples_per_second": 10.493, |
|
"eval_steps_per_second": 5.249, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00043663355528872393, |
|
"grad_norm": 0.24361670017242432, |
|
"learning_rate": 4e-05, |
|
"loss": 0.8638, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0006549503329330859, |
|
"grad_norm": 0.2159741222858429, |
|
"learning_rate": 6e-05, |
|
"loss": 1.0183, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0008732671105774479, |
|
"grad_norm": 0.28429126739501953, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0131, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0010915838882218098, |
|
"grad_norm": 0.2774108350276947, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9979, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0013099006658661718, |
|
"grad_norm": 0.34028008580207825, |
|
"learning_rate": 0.00012, |
|
"loss": 0.9189, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0015282174435105338, |
|
"grad_norm": 0.48363491892814636, |
|
"learning_rate": 0.00014, |
|
"loss": 0.8468, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0017465342211548957, |
|
"grad_norm": 0.4408450424671173, |
|
"learning_rate": 0.00016, |
|
"loss": 0.8258, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.001964850998799258, |
|
"grad_norm": 0.4010414183139801, |
|
"learning_rate": 0.00018, |
|
"loss": 0.7643, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0021831677764436196, |
|
"grad_norm": 0.23370158672332764, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8982, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002401484554087982, |
|
"grad_norm": 0.2269158512353897, |
|
"learning_rate": 0.0001999863304992469, |
|
"loss": 0.7584, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0026198013317323436, |
|
"grad_norm": 0.19322407245635986, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 0.6418, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0028381181093767058, |
|
"grad_norm": 0.22146472334861755, |
|
"learning_rate": 0.00019987699691483048, |
|
"loss": 0.5475, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0030564348870210675, |
|
"grad_norm": 0.23509173095226288, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 0.7627, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0032747516646654297, |
|
"grad_norm": 0.2376999855041504, |
|
"learning_rate": 0.000199658449300667, |
|
"loss": 0.5551, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0034930684423097914, |
|
"grad_norm": 0.24817153811454773, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 0.7661, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0037113852199541536, |
|
"grad_norm": 0.19188500940799713, |
|
"learning_rate": 0.00019933092663536382, |
|
"loss": 0.6419, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.003929701997598516, |
|
"grad_norm": 0.2121303379535675, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 0.7369, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0041480187752428775, |
|
"grad_norm": 0.21718432009220123, |
|
"learning_rate": 0.00019889478706014687, |
|
"loss": 0.6756, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.004366335552887239, |
|
"grad_norm": 0.1782589852809906, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 0.6777, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.004584652330531601, |
|
"grad_norm": 0.16271273791790009, |
|
"learning_rate": 0.00019835050748723824, |
|
"loss": 0.7569, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.004802969108175964, |
|
"grad_norm": 0.14470206201076508, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 0.4718, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.005021285885820325, |
|
"grad_norm": 0.1509976089000702, |
|
"learning_rate": 0.00019769868307835994, |
|
"loss": 0.7292, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.005239602663464687, |
|
"grad_norm": 0.15710937976837158, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 0.5817, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.005457919441109049, |
|
"grad_norm": 0.1630478948354721, |
|
"learning_rate": 0.00019694002659393305, |
|
"loss": 0.728, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0056762362187534115, |
|
"grad_norm": 0.12629622220993042, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 0.4336, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.005894552996397773, |
|
"grad_norm": 0.14236295223236084, |
|
"learning_rate": 0.00019607536761368484, |
|
"loss": 0.5446, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.006112869774042135, |
|
"grad_norm": 0.1700434535741806, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 0.7135, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.006331186551686497, |
|
"grad_norm": 0.14569510519504547, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.6261, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.006549503329330859, |
|
"grad_norm": 0.16239100694656372, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 0.6628, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.006767820106975221, |
|
"grad_norm": 0.14534121751785278, |
|
"learning_rate": 0.00019403193901161613, |
|
"loss": 0.5007, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.006986136884619583, |
|
"grad_norm": 0.16187363862991333, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 0.6864, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.007204453662263945, |
|
"grad_norm": 0.13931529223918915, |
|
"learning_rate": 0.00019285540384897073, |
|
"loss": 0.6503, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.007422770439908307, |
|
"grad_norm": 0.14639389514923096, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 0.7199, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.007641087217552669, |
|
"grad_norm": 0.15582042932510376, |
|
"learning_rate": 0.00019157733266550575, |
|
"loss": 0.6016, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.007859403995197032, |
|
"grad_norm": 0.15808553993701935, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 0.6748, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.008077720772841393, |
|
"grad_norm": 0.14080394804477692, |
|
"learning_rate": 0.00019019912301329592, |
|
"loss": 0.5573, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.008296037550485755, |
|
"grad_norm": 0.16601316630840302, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 0.7531, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.008514354328130117, |
|
"grad_norm": 0.15201738476753235, |
|
"learning_rate": 0.0001887222819443612, |
|
"loss": 0.6083, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.008732671105774479, |
|
"grad_norm": 0.16929268836975098, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 0.7018, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.00895098788341884, |
|
"grad_norm": 0.1630568504333496, |
|
"learning_rate": 0.00018714842436272773, |
|
"loss": 0.6234, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.009169304661063202, |
|
"grad_norm": 0.1684565395116806, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 0.7069, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.009387621438707564, |
|
"grad_norm": 0.2026190608739853, |
|
"learning_rate": 0.0001854792712585539, |
|
"loss": 0.6886, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.009605938216351927, |
|
"grad_norm": 0.17672273516654968, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.8231, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.009824254993996289, |
|
"grad_norm": 0.16011355817317963, |
|
"learning_rate": 0.00018371664782625287, |
|
"loss": 0.5992, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01004257177164065, |
|
"grad_norm": 0.1554107815027237, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 0.5707, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.010260888549285013, |
|
"grad_norm": 0.17009565234184265, |
|
"learning_rate": 0.00018186248146866927, |
|
"loss": 0.5901, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.010479205326929374, |
|
"grad_norm": 0.15228688716888428, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.6009, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.010697522104573736, |
|
"grad_norm": 0.18469072878360748, |
|
"learning_rate": 0.0001799187996894925, |
|
"loss": 0.685, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.010915838882218098, |
|
"grad_norm": 0.1757270246744156, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.7538, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.010915838882218098, |
|
"eval_loss": 0.6739084720611572, |
|
"eval_runtime": 181.9504, |
|
"eval_samples_per_second": 10.602, |
|
"eval_steps_per_second": 5.304, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.011134155659862461, |
|
"grad_norm": 0.16260926425457, |
|
"learning_rate": 0.00017788772787621126, |
|
"loss": 0.6544, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.011352472437506823, |
|
"grad_norm": 0.16436129808425903, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.5796, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.011570789215151185, |
|
"grad_norm": 0.15874071419239044, |
|
"learning_rate": 0.0001757714869760335, |
|
"loss": 0.6229, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.011789105992795547, |
|
"grad_norm": 0.17372044920921326, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 0.8861, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.012007422770439908, |
|
"grad_norm": 0.190564826130867, |
|
"learning_rate": 0.00017357239106731317, |
|
"loss": 0.779, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.01222573954808427, |
|
"grad_norm": 0.17491184175014496, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.528, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.012444056325728632, |
|
"grad_norm": 0.1566992700099945, |
|
"learning_rate": 0.00017129284482913972, |
|
"loss": 0.5963, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.012662373103372994, |
|
"grad_norm": 0.15744012594223022, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 0.5224, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.012880689881017357, |
|
"grad_norm": 0.1761963814496994, |
|
"learning_rate": 0.0001689353409118566, |
|
"loss": 0.8546, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.013099006658661719, |
|
"grad_norm": 0.16946905851364136, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 0.7221, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.01331732343630608, |
|
"grad_norm": 0.17242185771465302, |
|
"learning_rate": 0.0001665024572113848, |
|
"loss": 0.7315, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.013535640213950442, |
|
"grad_norm": 0.14320792257785797, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 0.5442, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.013753956991594804, |
|
"grad_norm": 0.14897648990154266, |
|
"learning_rate": 0.00016399685405033167, |
|
"loss": 0.6125, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.013972273769239166, |
|
"grad_norm": 0.16310685873031616, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.6272, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.014190590546883528, |
|
"grad_norm": 0.15317314863204956, |
|
"learning_rate": 0.0001614212712689668, |
|
"loss": 0.5583, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.01440890732452789, |
|
"grad_norm": 0.15903247892856598, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 0.5744, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.014627224102172253, |
|
"grad_norm": 0.14471517503261566, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.4967, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.014845540879816614, |
|
"grad_norm": 0.1645640879869461, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.8364, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.015063857657460976, |
|
"grad_norm": 0.2042667418718338, |
|
"learning_rate": 0.0001560715057351673, |
|
"loss": 0.5209, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.015282174435105338, |
|
"grad_norm": 0.18461734056472778, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 0.8209, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0155004912127497, |
|
"grad_norm": 0.17951343953609467, |
|
"learning_rate": 0.0001533031728727994, |
|
"loss": 0.7302, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.015718807990394063, |
|
"grad_norm": 0.17561626434326172, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.7459, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.015937124768038425, |
|
"grad_norm": 0.17738279700279236, |
|
"learning_rate": 0.0001504765537734844, |
|
"loss": 0.7624, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.016155441545682787, |
|
"grad_norm": 0.14604610204696655, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 0.5576, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.01637375832332715, |
|
"grad_norm": 0.162835031747818, |
|
"learning_rate": 0.00014759473930370736, |
|
"loss": 0.6048, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.01659207510097151, |
|
"grad_norm": 0.16418945789337158, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.5769, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.016810391878615872, |
|
"grad_norm": 0.19126634299755096, |
|
"learning_rate": 0.00014466088068528068, |
|
"loss": 0.7973, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.017028708656260234, |
|
"grad_norm": 0.1636025458574295, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 0.6992, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.017247025433904595, |
|
"grad_norm": 0.1639864444732666, |
|
"learning_rate": 0.00014167818604952906, |
|
"loss": 0.6315, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.017465342211548957, |
|
"grad_norm": 0.20205020904541016, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 1.0668, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.01768365898919332, |
|
"grad_norm": 0.16580188274383545, |
|
"learning_rate": 0.00013864991692924523, |
|
"loss": 0.7294, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.01790197576683768, |
|
"grad_norm": 0.17587777972221375, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 0.6971, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.018120292544482042, |
|
"grad_norm": 0.18422681093215942, |
|
"learning_rate": 0.00013557938469225167, |
|
"loss": 0.7526, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.018338609322126404, |
|
"grad_norm": 0.16445504128932953, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 0.6736, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.018556926099770766, |
|
"grad_norm": 0.14754797518253326, |
|
"learning_rate": 0.00013246994692046836, |
|
"loss": 0.4737, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.018775242877415128, |
|
"grad_norm": 0.19895146787166595, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.7198, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.018993559655059493, |
|
"grad_norm": 0.16792307794094086, |
|
"learning_rate": 0.0001293250037384465, |
|
"loss": 0.7405, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.019211876432703855, |
|
"grad_norm": 0.1736898571252823, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 0.6991, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.019430193210348216, |
|
"grad_norm": 0.19027847051620483, |
|
"learning_rate": 0.00012614799409538198, |
|
"loss": 0.8642, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.019648509987992578, |
|
"grad_norm": 0.16890498995780945, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 0.5199, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.01986682676563694, |
|
"grad_norm": 0.17198000848293304, |
|
"learning_rate": 0.00012294239200467516, |
|
"loss": 0.6932, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.0200851435432813, |
|
"grad_norm": 0.17940305173397064, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 0.5811, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.020303460320925663, |
|
"grad_norm": 0.17034873366355896, |
|
"learning_rate": 0.00011971170274514802, |
|
"loss": 0.6274, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.020521777098570025, |
|
"grad_norm": 0.17157679796218872, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 0.6947, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.020740093876214387, |
|
"grad_norm": 0.18038637936115265, |
|
"learning_rate": 0.00011645945902807341, |
|
"loss": 0.6019, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.02095841065385875, |
|
"grad_norm": 0.16414083540439606, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 0.5772, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.02117672743150311, |
|
"grad_norm": 0.17010308802127838, |
|
"learning_rate": 0.00011318921713420691, |
|
"loss": 0.6766, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.021395044209147472, |
|
"grad_norm": 0.17190682888031006, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 0.6057, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.021613360986791834, |
|
"grad_norm": 0.18158312141895294, |
|
"learning_rate": 0.0001099045530250463, |
|
"loss": 0.7767, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.021831677764436196, |
|
"grad_norm": 0.18446941673755646, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.7482, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.021831677764436196, |
|
"eval_loss": 0.6601452231407166, |
|
"eval_runtime": 181.8265, |
|
"eval_samples_per_second": 10.609, |
|
"eval_steps_per_second": 5.307, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.022049994542080557, |
|
"grad_norm": 0.21762917935848236, |
|
"learning_rate": 0.00010660905843256994, |
|
"loss": 0.7735, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.022268311319724923, |
|
"grad_norm": 0.18513105809688568, |
|
"learning_rate": 0.00010495837546732224, |
|
"loss": 0.7981, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.022486628097369284, |
|
"grad_norm": 0.1725614219903946, |
|
"learning_rate": 0.00010330633693173082, |
|
"loss": 0.66, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.022704944875013646, |
|
"grad_norm": 0.18261019885540009, |
|
"learning_rate": 0.00010165339447663587, |
|
"loss": 0.8389, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.022923261652658008, |
|
"grad_norm": 0.16905616223812103, |
|
"learning_rate": 0.0001, |
|
"loss": 0.5165, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.02314157843030237, |
|
"grad_norm": 0.18981021642684937, |
|
"learning_rate": 9.834660552336415e-05, |
|
"loss": 0.8955, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.02335989520794673, |
|
"grad_norm": 0.18465036153793335, |
|
"learning_rate": 9.669366306826919e-05, |
|
"loss": 0.6604, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.023578211985591093, |
|
"grad_norm": 0.1783265918493271, |
|
"learning_rate": 9.504162453267777e-05, |
|
"loss": 0.6334, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.023796528763235455, |
|
"grad_norm": 0.16945728659629822, |
|
"learning_rate": 9.339094156743007e-05, |
|
"loss": 0.5026, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.024014845540879817, |
|
"grad_norm": 0.16877439618110657, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 0.6765, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.02423316231852418, |
|
"grad_norm": 0.18142911791801453, |
|
"learning_rate": 9.009544697495374e-05, |
|
"loss": 0.6832, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.02445147909616854, |
|
"grad_norm": 0.17140087485313416, |
|
"learning_rate": 8.845153630304139e-05, |
|
"loss": 0.6352, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.024669795873812902, |
|
"grad_norm": 0.16582410037517548, |
|
"learning_rate": 8.681078286579311e-05, |
|
"loss": 0.5553, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.024888112651457264, |
|
"grad_norm": 0.15053234994411469, |
|
"learning_rate": 8.517363522881579e-05, |
|
"loss": 0.5002, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.025106429429101625, |
|
"grad_norm": 0.17048825323581696, |
|
"learning_rate": 8.35405409719266e-05, |
|
"loss": 0.6109, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.025324746206745987, |
|
"grad_norm": 0.16614414751529694, |
|
"learning_rate": 8.191194656678904e-05, |
|
"loss": 0.6749, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.02554306298439035, |
|
"grad_norm": 0.17033010721206665, |
|
"learning_rate": 8.028829725485199e-05, |
|
"loss": 0.5342, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.025761379762034714, |
|
"grad_norm": 0.18463349342346191, |
|
"learning_rate": 7.867003692562534e-05, |
|
"loss": 0.7151, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.025979696539679076, |
|
"grad_norm": 0.15648098289966583, |
|
"learning_rate": 7.705760799532485e-05, |
|
"loss": 0.5211, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.026198013317323438, |
|
"grad_norm": 0.16885478794574738, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 0.6123, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0264163300949678, |
|
"grad_norm": 0.16653332114219666, |
|
"learning_rate": 7.385200590461803e-05, |
|
"loss": 0.6589, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.02663464687261216, |
|
"grad_norm": 0.1621529459953308, |
|
"learning_rate": 7.225970912381556e-05, |
|
"loss": 0.5623, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.026852963650256523, |
|
"grad_norm": 0.17834222316741943, |
|
"learning_rate": 7.067499626155354e-05, |
|
"loss": 0.5837, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.027071280427900885, |
|
"grad_norm": 0.17469386756420135, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.7037, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.027289597205545246, |
|
"grad_norm": 0.16732703149318695, |
|
"learning_rate": 6.753005307953167e-05, |
|
"loss": 0.6519, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.027507913983189608, |
|
"grad_norm": 0.19885458052158356, |
|
"learning_rate": 6.59706825558357e-05, |
|
"loss": 0.637, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.02772623076083397, |
|
"grad_norm": 0.15952806174755096, |
|
"learning_rate": 6.442061530774834e-05, |
|
"loss": 0.5294, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.02794454753847833, |
|
"grad_norm": 0.18024499714374542, |
|
"learning_rate": 6.28802751081779e-05, |
|
"loss": 0.7163, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.028162864316122693, |
|
"grad_norm": 0.1813889443874359, |
|
"learning_rate": 6.135008307075481e-05, |
|
"loss": 0.8399, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.028381181093767055, |
|
"grad_norm": 0.16543258726596832, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 0.6442, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.028599497871411417, |
|
"grad_norm": 0.21711844205856323, |
|
"learning_rate": 5.832181395047098e-05, |
|
"loss": 0.9936, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.02881781464905578, |
|
"grad_norm": 0.16442720592021942, |
|
"learning_rate": 5.6824564766150726e-05, |
|
"loss": 0.5848, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.02903613142670014, |
|
"grad_norm": 0.16723956167697906, |
|
"learning_rate": 5.533911931471936e-05, |
|
"loss": 0.6755, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.029254448204344505, |
|
"grad_norm": 0.16711710393428802, |
|
"learning_rate": 5.386588370213124e-05, |
|
"loss": 0.6657, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.029472764981988867, |
|
"grad_norm": 0.16483482718467712, |
|
"learning_rate": 5.240526069629265e-05, |
|
"loss": 0.6668, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.02969108175963323, |
|
"grad_norm": 0.17765454947948456, |
|
"learning_rate": 5.095764961694922e-05, |
|
"loss": 0.6791, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.02990939853727759, |
|
"grad_norm": 0.18251171708106995, |
|
"learning_rate": 4.952344622651566e-05, |
|
"loss": 0.7367, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.030127715314921952, |
|
"grad_norm": 0.17135152220726013, |
|
"learning_rate": 4.810304262187852e-05, |
|
"loss": 0.6704, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.030346032092566314, |
|
"grad_norm": 0.194648876786232, |
|
"learning_rate": 4.669682712720065e-05, |
|
"loss": 0.9007, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.030564348870210676, |
|
"grad_norm": 0.14134380221366882, |
|
"learning_rate": 4.530518418775733e-05, |
|
"loss": 0.4324, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.030782665647855038, |
|
"grad_norm": 0.16082902252674103, |
|
"learning_rate": 4.392849426483274e-05, |
|
"loss": 0.5531, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.0310009824254994, |
|
"grad_norm": 0.185900017619133, |
|
"learning_rate": 4.256713373170564e-05, |
|
"loss": 0.8044, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.03121929920314376, |
|
"grad_norm": 0.16822083294391632, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.63, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.031437615980788126, |
|
"grad_norm": 0.1961585134267807, |
|
"learning_rate": 3.9891885271697496e-05, |
|
"loss": 0.7769, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.03165593275843249, |
|
"grad_norm": 0.17126162350177765, |
|
"learning_rate": 3.857872873103322e-05, |
|
"loss": 0.5417, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.03187424953607685, |
|
"grad_norm": 0.17973680794239044, |
|
"learning_rate": 3.7282364152646297e-05, |
|
"loss": 0.6837, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.03209256631372121, |
|
"grad_norm": 0.18085865676403046, |
|
"learning_rate": 3.600314594966834e-05, |
|
"loss": 0.7359, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.03231088309136557, |
|
"grad_norm": 0.1475389152765274, |
|
"learning_rate": 3.4741423847583134e-05, |
|
"loss": 0.4654, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.032529199869009935, |
|
"grad_norm": 0.1810745894908905, |
|
"learning_rate": 3.349754278861517e-05, |
|
"loss": 0.7939, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.0327475166466543, |
|
"grad_norm": 0.18114720284938812, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 0.6863, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0327475166466543, |
|
"eval_loss": 0.6545336842536926, |
|
"eval_runtime": 181.8183, |
|
"eval_samples_per_second": 10.609, |
|
"eval_steps_per_second": 5.307, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.03296583342429866, |
|
"grad_norm": 0.17468896508216858, |
|
"learning_rate": 3.106465908814342e-05, |
|
"loss": 0.6614, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.03318415020194302, |
|
"grad_norm": 0.15946292877197266, |
|
"learning_rate": 2.9876321572751144e-05, |
|
"loss": 0.5086, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.03340246697958738, |
|
"grad_norm": 0.17035330832004547, |
|
"learning_rate": 2.87071551708603e-05, |
|
"loss": 0.6278, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.033620783757231744, |
|
"grad_norm": 0.18423694372177124, |
|
"learning_rate": 2.7557479520891104e-05, |
|
"loss": 0.6574, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.033839100534876106, |
|
"grad_norm": 0.17745865881443024, |
|
"learning_rate": 2.6427608932686843e-05, |
|
"loss": 0.7644, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.03405741731252047, |
|
"grad_norm": 0.23100459575653076, |
|
"learning_rate": 2.5317852301584643e-05, |
|
"loss": 0.8962, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.03427573409016483, |
|
"grad_norm": 0.2258739024400711, |
|
"learning_rate": 2.422851302396655e-05, |
|
"loss": 0.7379, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.03449405086780919, |
|
"grad_norm": 0.189512699842453, |
|
"learning_rate": 2.315988891431412e-05, |
|
"loss": 0.8372, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.03471236764545355, |
|
"grad_norm": 0.18781927227973938, |
|
"learning_rate": 2.2112272123788768e-05, |
|
"loss": 0.6743, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.034930684423097914, |
|
"grad_norm": 0.15858572721481323, |
|
"learning_rate": 2.1085949060360654e-05, |
|
"loss": 0.4646, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.035149001200742276, |
|
"grad_norm": 0.18067395687103271, |
|
"learning_rate": 2.008120031050753e-05, |
|
"loss": 0.6516, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.03536731797838664, |
|
"grad_norm": 0.1741492599248886, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.6783, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.035585634756031, |
|
"grad_norm": 0.17116688191890717, |
|
"learning_rate": 1.8137518531330767e-05, |
|
"loss": 0.582, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.03580395153367536, |
|
"grad_norm": 0.19832322001457214, |
|
"learning_rate": 1.7199116885197995e-05, |
|
"loss": 0.838, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.03602226831131972, |
|
"grad_norm": 0.1676938533782959, |
|
"learning_rate": 1.6283352173747145e-05, |
|
"loss": 0.5495, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.036240585088964085, |
|
"grad_norm": 0.16515184938907623, |
|
"learning_rate": 1.5390474757906446e-05, |
|
"loss": 0.5651, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.03645890186660845, |
|
"grad_norm": 0.20315131545066833, |
|
"learning_rate": 1.4520728741446089e-05, |
|
"loss": 0.77, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.03667721864425281, |
|
"grad_norm": 0.18013328313827515, |
|
"learning_rate": 1.3674351904242611e-05, |
|
"loss": 0.7325, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.03689553542189717, |
|
"grad_norm": 0.18683043122291565, |
|
"learning_rate": 1.2851575637272262e-05, |
|
"loss": 0.6805, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.03711385219954153, |
|
"grad_norm": 0.18305127322673798, |
|
"learning_rate": 1.2052624879351104e-05, |
|
"loss": 0.7239, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.037332168977185894, |
|
"grad_norm": 0.19132886826992035, |
|
"learning_rate": 1.1277718055638819e-05, |
|
"loss": 0.746, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.037550485754830255, |
|
"grad_norm": 0.18872715532779694, |
|
"learning_rate": 1.0527067017923654e-05, |
|
"loss": 0.8513, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.037768802532474624, |
|
"grad_norm": 0.18976855278015137, |
|
"learning_rate": 9.80087698670411e-06, |
|
"loss": 0.8007, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.037987119310118986, |
|
"grad_norm": 0.1643442064523697, |
|
"learning_rate": 9.09934649508375e-06, |
|
"loss": 0.4781, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.03820543608776335, |
|
"grad_norm": 0.17088055610656738, |
|
"learning_rate": 8.422667334494249e-06, |
|
"loss": 0.5486, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.03842375286540771, |
|
"grad_norm": 0.16870273649692535, |
|
"learning_rate": 7.771024502261526e-06, |
|
"loss": 0.5814, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.03864206964305207, |
|
"grad_norm": 0.19257135689258575, |
|
"learning_rate": 7.144596151029303e-06, |
|
"loss": 0.615, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.03886038642069643, |
|
"grad_norm": 0.16927644610404968, |
|
"learning_rate": 6.543553540053926e-06, |
|
"loss": 0.6345, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.039078703198340795, |
|
"grad_norm": 0.16874222457408905, |
|
"learning_rate": 5.968060988383883e-06, |
|
"loss": 0.4597, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.039297019975985156, |
|
"grad_norm": 0.16570569574832916, |
|
"learning_rate": 5.418275829936537e-06, |
|
"loss": 0.6071, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.03951533675362952, |
|
"grad_norm": 0.17248547077178955, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.6086, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.03973365353127388, |
|
"grad_norm": 0.17762534320354462, |
|
"learning_rate": 4.3964218465642355e-06, |
|
"loss": 0.6477, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.03995197030891824, |
|
"grad_norm": 0.2004290670156479, |
|
"learning_rate": 3.924632386315186e-06, |
|
"loss": 0.7786, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.0401702870865626, |
|
"grad_norm": 0.17008547484874725, |
|
"learning_rate": 3.4791089722651436e-06, |
|
"loss": 0.6086, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.040388603864206965, |
|
"grad_norm": 0.17387335002422333, |
|
"learning_rate": 3.059973406066963e-06, |
|
"loss": 0.6396, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.04060692064185133, |
|
"grad_norm": 0.16781817376613617, |
|
"learning_rate": 2.667340275199426e-06, |
|
"loss": 0.4575, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.04082523741949569, |
|
"grad_norm": 0.17695757746696472, |
|
"learning_rate": 2.3013169216400733e-06, |
|
"loss": 0.7397, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.04104355419714005, |
|
"grad_norm": 0.1608322560787201, |
|
"learning_rate": 1.9620034125190644e-06, |
|
"loss": 0.4613, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.04126187097478441, |
|
"grad_norm": 0.17157816886901855, |
|
"learning_rate": 1.6494925127617634e-06, |
|
"loss": 0.6781, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.041480187752428774, |
|
"grad_norm": 0.18058647215366364, |
|
"learning_rate": 1.3638696597277679e-06, |
|
"loss": 0.7726, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.041698504530073136, |
|
"grad_norm": 0.1864567995071411, |
|
"learning_rate": 1.1052129398531507e-06, |
|
"loss": 0.7157, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.0419168213077175, |
|
"grad_norm": 0.1998099386692047, |
|
"learning_rate": 8.735930673024806e-07, |
|
"loss": 0.6688, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.04213513808536186, |
|
"grad_norm": 0.17582550644874573, |
|
"learning_rate": 6.690733646361857e-07, |
|
"loss": 0.6509, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.04235345486300622, |
|
"grad_norm": 0.19661235809326172, |
|
"learning_rate": 4.917097454988584e-07, |
|
"loss": 0.6709, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.04257177164065058, |
|
"grad_norm": 0.1901334822177887, |
|
"learning_rate": 3.415506993330153e-07, |
|
"loss": 0.7045, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.042790088418294944, |
|
"grad_norm": 0.1601884961128235, |
|
"learning_rate": 2.1863727812254653e-07, |
|
"loss": 0.5396, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.043008405195939306, |
|
"grad_norm": 0.1761172115802765, |
|
"learning_rate": 1.230030851695263e-07, |
|
"loss": 0.7687, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.04322672197358367, |
|
"grad_norm": 0.1702435463666916, |
|
"learning_rate": 5.467426590739511e-08, |
|
"loss": 0.6001, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.04344503875122803, |
|
"grad_norm": 0.16501539945602417, |
|
"learning_rate": 1.3669500753099585e-08, |
|
"loss": 0.5596, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.04366335552887239, |
|
"grad_norm": 0.19477379322052002, |
|
"learning_rate": 0.0, |
|
"loss": 0.7117, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04366335552887239, |
|
"eval_loss": 0.6532227993011475, |
|
"eval_runtime": 181.8705, |
|
"eval_samples_per_second": 10.606, |
|
"eval_steps_per_second": 5.306, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3142821265473536e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|