|
{ |
|
"best_metric": 0.6152855753898621, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.7682587819054839, |
|
"eval_steps": 25, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008086934546373515, |
|
"grad_norm": 1.5315700769424438, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.7462, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008086934546373515, |
|
"eval_loss": 2.1085262298583984, |
|
"eval_runtime": 3.426, |
|
"eval_samples_per_second": 14.594, |
|
"eval_steps_per_second": 3.795, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01617386909274703, |
|
"grad_norm": 1.7186044454574585, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.9119, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.024260803639120546, |
|
"grad_norm": 1.9918476343154907, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9158, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03234773818549406, |
|
"grad_norm": 2.2980024814605713, |
|
"learning_rate": 9.997376600647783e-05, |
|
"loss": 1.8927, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04043467273186758, |
|
"grad_norm": 3.0650627613067627, |
|
"learning_rate": 9.989509461357426e-05, |
|
"loss": 1.3474, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04852160727824109, |
|
"grad_norm": 1.560479998588562, |
|
"learning_rate": 9.976407754861426e-05, |
|
"loss": 1.1936, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.056608541824614604, |
|
"grad_norm": 2.4149370193481445, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 1.0949, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06469547637098812, |
|
"grad_norm": 1.5469962358474731, |
|
"learning_rate": 9.934567829727386e-05, |
|
"loss": 0.9459, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07278241091736164, |
|
"grad_norm": 0.9416520595550537, |
|
"learning_rate": 9.905878394570453e-05, |
|
"loss": 0.8887, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08086934546373516, |
|
"grad_norm": 0.7777389883995056, |
|
"learning_rate": 9.872051902290737e-05, |
|
"loss": 0.8094, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08895628001010866, |
|
"grad_norm": 0.6452591419219971, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 0.7, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09704321455648218, |
|
"grad_norm": 0.6828857660293579, |
|
"learning_rate": 9.789151450663723e-05, |
|
"loss": 0.5981, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1051301491028557, |
|
"grad_norm": 0.6075629591941833, |
|
"learning_rate": 9.740174149534693e-05, |
|
"loss": 0.7365, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11321708364922921, |
|
"grad_norm": 0.4803518056869507, |
|
"learning_rate": 9.686252995020249e-05, |
|
"loss": 0.6859, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12130401819560273, |
|
"grad_norm": 0.5006831288337708, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 0.7054, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12939095274197623, |
|
"grad_norm": 0.5048025846481323, |
|
"learning_rate": 9.563836295460398e-05, |
|
"loss": 0.8301, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.13747788728834975, |
|
"grad_norm": 0.5192086696624756, |
|
"learning_rate": 9.495483482810688e-05, |
|
"loss": 0.8157, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14556482183472327, |
|
"grad_norm": 0.5117838978767395, |
|
"learning_rate": 9.422472115147382e-05, |
|
"loss": 0.7733, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1536517563810968, |
|
"grad_norm": 0.4643174111843109, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.7385, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1617386909274703, |
|
"grad_norm": 0.44568756222724915, |
|
"learning_rate": 9.2628195591462e-05, |
|
"loss": 0.7317, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16982562547384383, |
|
"grad_norm": 0.4924642741680145, |
|
"learning_rate": 9.176364518546989e-05, |
|
"loss": 0.708, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17791256002021733, |
|
"grad_norm": 0.48531684279441833, |
|
"learning_rate": 9.08562300137157e-05, |
|
"loss": 0.7477, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18599949456659085, |
|
"grad_norm": 0.4396596848964691, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 0.6783, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19408642911296436, |
|
"grad_norm": 0.40898025035858154, |
|
"learning_rate": 8.891708613973126e-05, |
|
"loss": 0.6408, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20217336365933788, |
|
"grad_norm": 0.48895806074142456, |
|
"learning_rate": 8.788761839251559e-05, |
|
"loss": 0.6364, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20217336365933788, |
|
"eval_loss": 0.6505181193351746, |
|
"eval_runtime": 3.4916, |
|
"eval_samples_per_second": 14.32, |
|
"eval_steps_per_second": 3.723, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2102602982057114, |
|
"grad_norm": 0.5990607142448425, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.7604, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.21834723275208492, |
|
"grad_norm": 0.5680568218231201, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 0.7052, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22643416729845842, |
|
"grad_norm": 0.4318358302116394, |
|
"learning_rate": 8.457416554680877e-05, |
|
"loss": 0.7392, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23452110184483194, |
|
"grad_norm": 0.4534890353679657, |
|
"learning_rate": 8.339895749467238e-05, |
|
"loss": 0.7612, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24260803639120546, |
|
"grad_norm": 0.41572484374046326, |
|
"learning_rate": 8.219063752844926e-05, |
|
"loss": 0.6753, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.250694970937579, |
|
"grad_norm": 0.391692578792572, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 0.7202, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.25878190548395247, |
|
"grad_norm": 0.4226691722869873, |
|
"learning_rate": 7.968033420621935e-05, |
|
"loss": 0.6996, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.266868840030326, |
|
"grad_norm": 0.43875357508659363, |
|
"learning_rate": 7.838127775159452e-05, |
|
"loss": 0.7064, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2749557745766995, |
|
"grad_norm": 0.3757180869579315, |
|
"learning_rate": 7.705495977301078e-05, |
|
"loss": 0.6694, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28304270912307306, |
|
"grad_norm": 0.41616472601890564, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 0.6895, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29112964366944655, |
|
"grad_norm": 0.4041263461112976, |
|
"learning_rate": 7.43267549363537e-05, |
|
"loss": 0.6098, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.29921657821582004, |
|
"grad_norm": 0.39486125111579895, |
|
"learning_rate": 7.292804904308087e-05, |
|
"loss": 0.6039, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3073035127621936, |
|
"grad_norm": 0.35433000326156616, |
|
"learning_rate": 7.150843984658754e-05, |
|
"loss": 0.5956, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3153904473085671, |
|
"grad_norm": 0.3867066204547882, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 0.6729, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3234773818549406, |
|
"grad_norm": 0.40773889422416687, |
|
"learning_rate": 6.861315478964841e-05, |
|
"loss": 0.6722, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3315643164013141, |
|
"grad_norm": 0.36118409037590027, |
|
"learning_rate": 6.714085470206609e-05, |
|
"loss": 0.6863, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.33965125094768767, |
|
"grad_norm": 0.42860254645347595, |
|
"learning_rate": 6.56543989209901e-05, |
|
"loss": 0.7423, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.34773818549406116, |
|
"grad_norm": 0.3781806528568268, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 0.6734, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.35582512004043465, |
|
"grad_norm": 0.45583733916282654, |
|
"learning_rate": 6.264596732629e-05, |
|
"loss": 0.7366, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3639120545868082, |
|
"grad_norm": 0.42888590693473816, |
|
"learning_rate": 6.112749920933111e-05, |
|
"loss": 0.6791, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3719989891331817, |
|
"grad_norm": 0.5353739261627197, |
|
"learning_rate": 5.960188670239154e-05, |
|
"loss": 0.6581, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38008592367955524, |
|
"grad_norm": 0.6451358199119568, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 0.6297, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38817285822592873, |
|
"grad_norm": 0.4006296396255493, |
|
"learning_rate": 5.653634995836856e-05, |
|
"loss": 0.614, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3962597927723022, |
|
"grad_norm": 0.4140074849128723, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.5914, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.40434672731867577, |
|
"grad_norm": 0.3834184408187866, |
|
"learning_rate": 5.346365004163145e-05, |
|
"loss": 0.5771, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.40434672731867577, |
|
"eval_loss": 0.6258954405784607, |
|
"eval_runtime": 3.5135, |
|
"eval_samples_per_second": 14.231, |
|
"eval_steps_per_second": 3.7, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41243366186504926, |
|
"grad_norm": 0.4838196933269501, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 0.7284, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4205205964114228, |
|
"grad_norm": 0.49653565883636475, |
|
"learning_rate": 5.0398113297608465e-05, |
|
"loss": 0.6909, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.4286075309577963, |
|
"grad_norm": 0.5994144678115845, |
|
"learning_rate": 4.887250079066892e-05, |
|
"loss": 0.7597, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.43669446550416985, |
|
"grad_norm": 0.5351035594940186, |
|
"learning_rate": 4.7354032673710005e-05, |
|
"loss": 0.7537, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.44478140005054334, |
|
"grad_norm": 0.4333861470222473, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 0.6553, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.45286833459691683, |
|
"grad_norm": 0.39463362097740173, |
|
"learning_rate": 4.43456010790099e-05, |
|
"loss": 0.7712, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4609552691432904, |
|
"grad_norm": 0.36598527431488037, |
|
"learning_rate": 4.285914529793391e-05, |
|
"loss": 0.6333, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4690422036896639, |
|
"grad_norm": 0.3563452661037445, |
|
"learning_rate": 4.13868452103516e-05, |
|
"loss": 0.6562, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4771291382360374, |
|
"grad_norm": 0.451840341091156, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 0.711, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.4852160727824109, |
|
"grad_norm": 0.37084537744522095, |
|
"learning_rate": 3.8491560153412466e-05, |
|
"loss": 0.6075, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49330300732878446, |
|
"grad_norm": 0.47421273589134216, |
|
"learning_rate": 3.707195095691913e-05, |
|
"loss": 0.7213, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.501389941875158, |
|
"grad_norm": 0.5006868243217468, |
|
"learning_rate": 3.567324506364632e-05, |
|
"loss": 0.5814, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5094768764215315, |
|
"grad_norm": 0.38857290148735046, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 0.6031, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5175638109679049, |
|
"grad_norm": 0.3656562268733978, |
|
"learning_rate": 3.2945040226989244e-05, |
|
"loss": 0.7153, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5256507455142785, |
|
"grad_norm": 0.3729749023914337, |
|
"learning_rate": 3.16187222484055e-05, |
|
"loss": 0.7616, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.533737680060652, |
|
"grad_norm": 0.3745996654033661, |
|
"learning_rate": 3.0319665793780648e-05, |
|
"loss": 0.7703, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5418246146070256, |
|
"grad_norm": 0.3874468505382538, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 0.7703, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.549911549153399, |
|
"grad_norm": 0.40286222100257874, |
|
"learning_rate": 2.7809362471550748e-05, |
|
"loss": 0.7155, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5579984836997726, |
|
"grad_norm": 0.3780246078968048, |
|
"learning_rate": 2.660104250532764e-05, |
|
"loss": 0.6842, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5660854182461461, |
|
"grad_norm": 0.34932300448417664, |
|
"learning_rate": 2.5425834453191232e-05, |
|
"loss": 0.6526, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5741723527925195, |
|
"grad_norm": 0.37983378767967224, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 0.6156, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5822592873388931, |
|
"grad_norm": 0.3535488545894623, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.5966, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.5903462218852666, |
|
"grad_norm": 0.5434425473213196, |
|
"learning_rate": 2.2112381607484417e-05, |
|
"loss": 0.7458, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.5984331564316401, |
|
"grad_norm": 0.37971338629722595, |
|
"learning_rate": 2.1082913860268765e-05, |
|
"loss": 0.6074, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6065200909780136, |
|
"grad_norm": 0.4319310188293457, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 0.5151, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6065200909780136, |
|
"eval_loss": 0.6152855753898621, |
|
"eval_runtime": 3.512, |
|
"eval_samples_per_second": 14.237, |
|
"eval_steps_per_second": 3.702, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6146070255243872, |
|
"grad_norm": 0.34593966603279114, |
|
"learning_rate": 1.91437699862843e-05, |
|
"loss": 0.6562, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6226939600707607, |
|
"grad_norm": 0.36323046684265137, |
|
"learning_rate": 1.8236354814530112e-05, |
|
"loss": 0.6715, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.6307808946171342, |
|
"grad_norm": 0.39813435077667236, |
|
"learning_rate": 1.7371804408538024e-05, |
|
"loss": 0.7863, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.6388678291635077, |
|
"grad_norm": 0.34668055176734924, |
|
"learning_rate": 1.6551126795408016e-05, |
|
"loss": 0.6792, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.6469547637098813, |
|
"grad_norm": 0.38668936491012573, |
|
"learning_rate": 1.577527884852619e-05, |
|
"loss": 0.8067, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6550416982562547, |
|
"grad_norm": 0.38659054040908813, |
|
"learning_rate": 1.5045165171893116e-05, |
|
"loss": 0.7092, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.6631286328026282, |
|
"grad_norm": 0.34319162368774414, |
|
"learning_rate": 1.4361637045396029e-05, |
|
"loss": 0.6067, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.6712155673490018, |
|
"grad_norm": 0.38419049978256226, |
|
"learning_rate": 1.3725491432254624e-05, |
|
"loss": 0.7169, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.6793025018953753, |
|
"grad_norm": 0.3796985149383545, |
|
"learning_rate": 1.313747004979751e-05, |
|
"loss": 0.7196, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.6873894364417488, |
|
"grad_norm": 0.38364750146865845, |
|
"learning_rate": 1.2598258504653081e-05, |
|
"loss": 0.6701, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6954763709881223, |
|
"grad_norm": 0.4333467185497284, |
|
"learning_rate": 1.2108485493362765e-05, |
|
"loss": 0.6451, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7035633055344959, |
|
"grad_norm": 0.38475340604782104, |
|
"learning_rate": 1.1668722069349041e-05, |
|
"loss": 0.6131, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7116502400808693, |
|
"grad_norm": 0.34996670484542847, |
|
"learning_rate": 1.1279480977092635e-05, |
|
"loss": 0.5663, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.7197371746272428, |
|
"grad_norm": 0.3536860942840576, |
|
"learning_rate": 1.094121605429547e-05, |
|
"loss": 0.6976, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.7278241091736164, |
|
"grad_norm": 0.36633411049842834, |
|
"learning_rate": 1.0654321702726141e-05, |
|
"loss": 0.6998, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7359110437199899, |
|
"grad_norm": 0.37283647060394287, |
|
"learning_rate": 1.0419132428365116e-05, |
|
"loss": 0.6256, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.7439979782663634, |
|
"grad_norm": 0.35322660207748413, |
|
"learning_rate": 1.0235922451385733e-05, |
|
"loss": 0.6615, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.7520849128127369, |
|
"grad_norm": 0.41877758502960205, |
|
"learning_rate": 1.0104905386425733e-05, |
|
"loss": 0.7689, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.7601718473591105, |
|
"grad_norm": 0.3584843575954437, |
|
"learning_rate": 1.002623399352217e-05, |
|
"loss": 0.6636, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.7682587819054839, |
|
"grad_norm": 0.3423212170600891, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5993, |
|
"step": 95 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1339358872902042e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|