bbytxt's picture
Training in progress, step 100, checkpoint
7ba1705 verified
raw
history blame
19.5 kB
{
"best_metric": 10.496562957763672,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.06595218466611706,
"eval_steps": 25,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006595218466611707,
"grad_norm": 5.548171520233154,
"learning_rate": 2.9999999999999997e-05,
"loss": 44.5605,
"step": 1
},
{
"epoch": 0.0006595218466611707,
"eval_loss": 11.123749732971191,
"eval_runtime": 0.5337,
"eval_samples_per_second": 93.678,
"eval_steps_per_second": 13.115,
"step": 1
},
{
"epoch": 0.0013190436933223414,
"grad_norm": 5.983486175537109,
"learning_rate": 5.9999999999999995e-05,
"loss": 44.5645,
"step": 2
},
{
"epoch": 0.001978565539983512,
"grad_norm": 5.650338172912598,
"learning_rate": 8.999999999999999e-05,
"loss": 44.5039,
"step": 3
},
{
"epoch": 0.0026380873866446827,
"grad_norm": 5.475784778594971,
"learning_rate": 0.00011999999999999999,
"loss": 44.5117,
"step": 4
},
{
"epoch": 0.0032976092333058533,
"grad_norm": 5.486029148101807,
"learning_rate": 0.00015,
"loss": 44.4688,
"step": 5
},
{
"epoch": 0.003957131079967024,
"grad_norm": 5.552286148071289,
"learning_rate": 0.00017999999999999998,
"loss": 44.4121,
"step": 6
},
{
"epoch": 0.004616652926628195,
"grad_norm": 5.922216415405273,
"learning_rate": 0.00020999999999999998,
"loss": 44.3496,
"step": 7
},
{
"epoch": 0.0052761747732893655,
"grad_norm": 5.640804767608643,
"learning_rate": 0.00023999999999999998,
"loss": 44.2832,
"step": 8
},
{
"epoch": 0.005935696619950536,
"grad_norm": 5.632114887237549,
"learning_rate": 0.00027,
"loss": 44.2227,
"step": 9
},
{
"epoch": 0.006595218466611707,
"grad_norm": 5.427684307098389,
"learning_rate": 0.0003,
"loss": 44.1895,
"step": 10
},
{
"epoch": 0.007254740313272877,
"grad_norm": 5.3626389503479,
"learning_rate": 0.0002999794957488703,
"loss": 44.1113,
"step": 11
},
{
"epoch": 0.007914262159934049,
"grad_norm": 5.239843368530273,
"learning_rate": 0.0002999179886011389,
"loss": 43.9922,
"step": 12
},
{
"epoch": 0.008573784006595218,
"grad_norm": 5.086913585662842,
"learning_rate": 0.0002998154953722457,
"loss": 43.8848,
"step": 13
},
{
"epoch": 0.00923330585325639,
"grad_norm": 5.158645153045654,
"learning_rate": 0.00029967204408281613,
"loss": 43.7676,
"step": 14
},
{
"epoch": 0.00989282769991756,
"grad_norm": 4.871565341949463,
"learning_rate": 0.00029948767395100045,
"loss": 43.7051,
"step": 15
},
{
"epoch": 0.010552349546578731,
"grad_norm": 4.838162422180176,
"learning_rate": 0.0002992624353817517,
"loss": 43.6191,
"step": 16
},
{
"epoch": 0.0112118713932399,
"grad_norm": 4.602661609649658,
"learning_rate": 0.0002989963899530457,
"loss": 43.5488,
"step": 17
},
{
"epoch": 0.011871393239901072,
"grad_norm": 4.472682476043701,
"learning_rate": 0.00029868961039904624,
"loss": 43.4824,
"step": 18
},
{
"epoch": 0.012530915086562242,
"grad_norm": 4.483953952789307,
"learning_rate": 0.00029834218059022024,
"loss": 43.4609,
"step": 19
},
{
"epoch": 0.013190436933223413,
"grad_norm": 4.183454513549805,
"learning_rate": 0.00029795419551040833,
"loss": 43.3125,
"step": 20
},
{
"epoch": 0.013849958779884583,
"grad_norm": 4.2280449867248535,
"learning_rate": 0.00029752576123085736,
"loss": 43.1465,
"step": 21
},
{
"epoch": 0.014509480626545754,
"grad_norm": 3.8879261016845703,
"learning_rate": 0.0002970569948812214,
"loss": 43.2305,
"step": 22
},
{
"epoch": 0.015169002473206924,
"grad_norm": 3.6963751316070557,
"learning_rate": 0.0002965480246175399,
"loss": 43.1582,
"step": 23
},
{
"epoch": 0.015828524319868097,
"grad_norm": 3.540698289871216,
"learning_rate": 0.0002959989895872009,
"loss": 43.0605,
"step": 24
},
{
"epoch": 0.016488046166529265,
"grad_norm": 3.258104085922241,
"learning_rate": 0.0002954100398908995,
"loss": 42.9844,
"step": 25
},
{
"epoch": 0.016488046166529265,
"eval_loss": 10.72249984741211,
"eval_runtime": 0.1131,
"eval_samples_per_second": 441.99,
"eval_steps_per_second": 61.879,
"step": 25
},
{
"epoch": 0.017147568013190437,
"grad_norm": 3.194683074951172,
"learning_rate": 0.0002947813365416023,
"loss": 42.8789,
"step": 26
},
{
"epoch": 0.017807089859851608,
"grad_norm": 3.04011607170105,
"learning_rate": 0.0002941130514205272,
"loss": 42.8496,
"step": 27
},
{
"epoch": 0.01846661170651278,
"grad_norm": 2.8812403678894043,
"learning_rate": 0.0002934053672301536,
"loss": 42.7383,
"step": 28
},
{
"epoch": 0.019126133553173948,
"grad_norm": 2.8334062099456787,
"learning_rate": 0.00029265847744427303,
"loss": 42.791,
"step": 29
},
{
"epoch": 0.01978565539983512,
"grad_norm": 2.608891248703003,
"learning_rate": 0.00029187258625509513,
"loss": 42.7285,
"step": 30
},
{
"epoch": 0.02044517724649629,
"grad_norm": 2.658057451248169,
"learning_rate": 0.00029104790851742417,
"loss": 42.7129,
"step": 31
},
{
"epoch": 0.021104699093157462,
"grad_norm": 2.4135985374450684,
"learning_rate": 0.0002901846696899191,
"loss": 42.6523,
"step": 32
},
{
"epoch": 0.02176422093981863,
"grad_norm": 2.3150999546051025,
"learning_rate": 0.00028928310577345606,
"loss": 42.6055,
"step": 33
},
{
"epoch": 0.0224237427864798,
"grad_norm": 2.3153157234191895,
"learning_rate": 0.0002883434632466077,
"loss": 42.5723,
"step": 34
},
{
"epoch": 0.023083264633140973,
"grad_norm": 2.148899555206299,
"learning_rate": 0.00028736599899825856,
"loss": 42.5176,
"step": 35
},
{
"epoch": 0.023742786479802144,
"grad_norm": 2.181697130203247,
"learning_rate": 0.00028635098025737434,
"loss": 42.5156,
"step": 36
},
{
"epoch": 0.024402308326463316,
"grad_norm": 2.0900909900665283,
"learning_rate": 0.00028529868451994384,
"loss": 42.4102,
"step": 37
},
{
"epoch": 0.025061830173124484,
"grad_norm": 1.945241928100586,
"learning_rate": 0.0002842093994731145,
"loss": 42.4102,
"step": 38
},
{
"epoch": 0.025721352019785655,
"grad_norm": 1.914509892463684,
"learning_rate": 0.00028308342291654174,
"loss": 42.3848,
"step": 39
},
{
"epoch": 0.026380873866446827,
"grad_norm": 2.0310447216033936,
"learning_rate": 0.00028192106268097334,
"loss": 42.3613,
"step": 40
},
{
"epoch": 0.027040395713107998,
"grad_norm": 2.335347890853882,
"learning_rate": 0.00028072263654409154,
"loss": 42.3184,
"step": 41
},
{
"epoch": 0.027699917559769166,
"grad_norm": 1.846063256263733,
"learning_rate": 0.0002794884721436361,
"loss": 42.291,
"step": 42
},
{
"epoch": 0.028359439406430337,
"grad_norm": 1.7599420547485352,
"learning_rate": 0.00027821890688783083,
"loss": 42.2578,
"step": 43
},
{
"epoch": 0.02901896125309151,
"grad_norm": 1.7382268905639648,
"learning_rate": 0.0002769142878631403,
"loss": 42.2266,
"step": 44
},
{
"epoch": 0.02967848309975268,
"grad_norm": 1.5867624282836914,
"learning_rate": 0.00027557497173937923,
"loss": 42.1758,
"step": 45
},
{
"epoch": 0.03033800494641385,
"grad_norm": 1.6499996185302734,
"learning_rate": 0.000274201324672203,
"loss": 42.1758,
"step": 46
},
{
"epoch": 0.03099752679307502,
"grad_norm": 1.8098398447036743,
"learning_rate": 0.00027279372220300385,
"loss": 42.1719,
"step": 47
},
{
"epoch": 0.031657048639736195,
"grad_norm": 1.7325196266174316,
"learning_rate": 0.0002713525491562421,
"loss": 42.1387,
"step": 48
},
{
"epoch": 0.03231657048639736,
"grad_norm": 1.4527415037155151,
"learning_rate": 0.00026987819953423867,
"loss": 42.1289,
"step": 49
},
{
"epoch": 0.03297609233305853,
"grad_norm": 1.7800573110580444,
"learning_rate": 0.00026837107640945905,
"loss": 42.0781,
"step": 50
},
{
"epoch": 0.03297609233305853,
"eval_loss": 10.520312309265137,
"eval_runtime": 0.1132,
"eval_samples_per_second": 441.817,
"eval_steps_per_second": 61.854,
"step": 50
},
{
"epoch": 0.033635614179719706,
"grad_norm": 1.5558362007141113,
"learning_rate": 0.0002668315918143169,
"loss": 42.1289,
"step": 51
},
{
"epoch": 0.034295136026380874,
"grad_norm": 1.4583945274353027,
"learning_rate": 0.00026526016662852886,
"loss": 42.0273,
"step": 52
},
{
"epoch": 0.03495465787304204,
"grad_norm": 1.2193175554275513,
"learning_rate": 0.00026365723046405023,
"loss": 42.0605,
"step": 53
},
{
"epoch": 0.035614179719703216,
"grad_norm": 1.212590217590332,
"learning_rate": 0.0002620232215476231,
"loss": 42.0234,
"step": 54
},
{
"epoch": 0.036273701566364384,
"grad_norm": 0.9861271381378174,
"learning_rate": 0.0002603585866009697,
"loss": 42.0625,
"step": 55
},
{
"epoch": 0.03693322341302556,
"grad_norm": 0.9094260931015015,
"learning_rate": 0.00025866378071866334,
"loss": 42.0195,
"step": 56
},
{
"epoch": 0.03759274525968673,
"grad_norm": 0.9702939391136169,
"learning_rate": 0.00025693926724370956,
"loss": 42.0547,
"step": 57
},
{
"epoch": 0.038252267106347895,
"grad_norm": 0.8315839767456055,
"learning_rate": 0.00025518551764087326,
"loss": 42.0605,
"step": 58
},
{
"epoch": 0.03891178895300907,
"grad_norm": 0.9290674328804016,
"learning_rate": 0.00025340301136778483,
"loss": 42.0391,
"step": 59
},
{
"epoch": 0.03957131079967024,
"grad_norm": 1.1367566585540771,
"learning_rate": 0.00025159223574386114,
"loss": 41.9785,
"step": 60
},
{
"epoch": 0.04023083264633141,
"grad_norm": 0.842949390411377,
"learning_rate": 0.0002497536858170772,
"loss": 42.0508,
"step": 61
},
{
"epoch": 0.04089035449299258,
"grad_norm": 0.7557629346847534,
"learning_rate": 0.00024788786422862526,
"loss": 42.0332,
"step": 62
},
{
"epoch": 0.04154987633965375,
"grad_norm": 0.6981768608093262,
"learning_rate": 0.00024599528107549745,
"loss": 42.0215,
"step": 63
},
{
"epoch": 0.042209398186314924,
"grad_norm": 0.6374576687812805,
"learning_rate": 0.00024407645377103054,
"loss": 42.0039,
"step": 64
},
{
"epoch": 0.04286892003297609,
"grad_norm": 0.7283995151519775,
"learning_rate": 0.00024213190690345018,
"loss": 41.998,
"step": 65
},
{
"epoch": 0.04352844187963726,
"grad_norm": 0.7016935348510742,
"learning_rate": 0.00024016217209245374,
"loss": 41.9688,
"step": 66
},
{
"epoch": 0.044187963726298435,
"grad_norm": 0.7185855507850647,
"learning_rate": 0.00023816778784387094,
"loss": 41.9727,
"step": 67
},
{
"epoch": 0.0448474855729596,
"grad_norm": 0.5180225372314453,
"learning_rate": 0.0002361492994024415,
"loss": 41.9844,
"step": 68
},
{
"epoch": 0.04550700741962078,
"grad_norm": 0.7438845038414001,
"learning_rate": 0.0002341072586027509,
"loss": 41.959,
"step": 69
},
{
"epoch": 0.046166529266281946,
"grad_norm": 0.6051533818244934,
"learning_rate": 0.00023204222371836405,
"loss": 41.9551,
"step": 70
},
{
"epoch": 0.046826051112943114,
"grad_norm": 0.5988097190856934,
"learning_rate": 0.00022995475930919905,
"loss": 42.0098,
"step": 71
},
{
"epoch": 0.04748557295960429,
"grad_norm": 0.6344655156135559,
"learning_rate": 0.00022784543606718227,
"loss": 42.0039,
"step": 72
},
{
"epoch": 0.04814509480626546,
"grad_norm": 0.6249358057975769,
"learning_rate": 0.00022571483066022657,
"loss": 41.998,
"step": 73
},
{
"epoch": 0.04880461665292663,
"grad_norm": 0.6966153383255005,
"learning_rate": 0.0002235635255745762,
"loss": 41.9434,
"step": 74
},
{
"epoch": 0.0494641384995878,
"grad_norm": 0.6041496396064758,
"learning_rate": 0.00022139210895556104,
"loss": 41.9766,
"step": 75
},
{
"epoch": 0.0494641384995878,
"eval_loss": 10.489999771118164,
"eval_runtime": 0.1147,
"eval_samples_per_second": 436.085,
"eval_steps_per_second": 61.052,
"step": 75
},
{
"epoch": 0.05012366034624897,
"grad_norm": 0.6852412223815918,
"learning_rate": 0.00021920117444680317,
"loss": 42.0117,
"step": 76
},
{
"epoch": 0.05078318219291014,
"grad_norm": 0.43333616852760315,
"learning_rate": 0.00021699132102792097,
"loss": 41.9805,
"step": 77
},
{
"epoch": 0.05144270403957131,
"grad_norm": 0.6226640343666077,
"learning_rate": 0.0002147631528507739,
"loss": 41.9941,
"step": 78
},
{
"epoch": 0.05210222588623248,
"grad_norm": 0.57357257604599,
"learning_rate": 0.00021251727907429355,
"loss": 42.0098,
"step": 79
},
{
"epoch": 0.05276174773289365,
"grad_norm": 0.4521426260471344,
"learning_rate": 0.0002102543136979454,
"loss": 42.0098,
"step": 80
},
{
"epoch": 0.05342126957955482,
"grad_norm": 0.4179379642009735,
"learning_rate": 0.0002079748753938678,
"loss": 41.9688,
"step": 81
},
{
"epoch": 0.054080791426215996,
"grad_norm": 0.421734094619751,
"learning_rate": 0.0002056795873377331,
"loss": 41.9785,
"step": 82
},
{
"epoch": 0.054740313272877164,
"grad_norm": 0.3774226903915405,
"learning_rate": 0.00020336907703837748,
"loss": 41.9785,
"step": 83
},
{
"epoch": 0.05539983511953833,
"grad_norm": 0.3708513379096985,
"learning_rate": 0.00020104397616624645,
"loss": 41.9727,
"step": 84
},
{
"epoch": 0.05605935696619951,
"grad_norm": 0.42900216579437256,
"learning_rate": 0.00019870492038070252,
"loss": 41.957,
"step": 85
},
{
"epoch": 0.056718878812860675,
"grad_norm": 0.46859726309776306,
"learning_rate": 0.0001963525491562421,
"loss": 41.9707,
"step": 86
},
{
"epoch": 0.05737840065952185,
"grad_norm": 0.34871360659599304,
"learning_rate": 0.0001939875056076697,
"loss": 41.9727,
"step": 87
},
{
"epoch": 0.05803792250618302,
"grad_norm": 0.3781905472278595,
"learning_rate": 0.00019161043631427666,
"loss": 41.9902,
"step": 88
},
{
"epoch": 0.058697444352844186,
"grad_norm": 0.6564770936965942,
"learning_rate": 0.00018922199114307294,
"loss": 41.9258,
"step": 89
},
{
"epoch": 0.05935696619950536,
"grad_norm": 0.7199106812477112,
"learning_rate": 0.00018682282307111987,
"loss": 41.9258,
"step": 90
},
{
"epoch": 0.06001648804616653,
"grad_norm": 0.3589068651199341,
"learning_rate": 0.00018441358800701273,
"loss": 41.9805,
"step": 91
},
{
"epoch": 0.0606760098928277,
"grad_norm": 0.4372813403606415,
"learning_rate": 0.00018199494461156203,
"loss": 41.998,
"step": 92
},
{
"epoch": 0.06133553173948887,
"grad_norm": 0.4405193626880646,
"learning_rate": 0.000179567554117722,
"loss": 41.9453,
"step": 93
},
{
"epoch": 0.06199505358615004,
"grad_norm": 0.7273338437080383,
"learning_rate": 0.00017713208014981648,
"loss": 41.9199,
"step": 94
},
{
"epoch": 0.06265457543281121,
"grad_norm": 0.4150626063346863,
"learning_rate": 0.00017468918854211007,
"loss": 41.9609,
"step": 95
},
{
"epoch": 0.06331409727947239,
"grad_norm": 0.4945806860923767,
"learning_rate": 0.00017223954715677627,
"loss": 41.9844,
"step": 96
},
{
"epoch": 0.06397361912613356,
"grad_norm": 0.36564674973487854,
"learning_rate": 0.00016978382570131034,
"loss": 41.9512,
"step": 97
},
{
"epoch": 0.06463314097279473,
"grad_norm": 0.632477879524231,
"learning_rate": 0.00016732269554543794,
"loss": 41.918,
"step": 98
},
{
"epoch": 0.0652926628194559,
"grad_norm": 0.24244551360607147,
"learning_rate": 0.00016485682953756942,
"loss": 41.9531,
"step": 99
},
{
"epoch": 0.06595218466611706,
"grad_norm": 0.39558830857276917,
"learning_rate": 0.00016238690182084986,
"loss": 41.9551,
"step": 100
},
{
"epoch": 0.06595218466611706,
"eval_loss": 10.496562957763672,
"eval_runtime": 0.1143,
"eval_samples_per_second": 437.308,
"eval_steps_per_second": 61.223,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 795475968000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}