gpt_train_2_768_new / trainer_state.json
gokulsrinivasagan's picture
End of training
bc548b6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.321127579192096,
"eval_steps": 1000000,
"global_step": 64148,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07265329845975008,
"grad_norm": 0.39395979046821594,
"learning_rate": 9.992734670154026e-06,
"loss": 8.1621,
"step": 500
},
{
"epoch": 0.14530659691950015,
"grad_norm": 0.5780870318412781,
"learning_rate": 9.985469340308051e-06,
"loss": 7.0196,
"step": 1000
},
{
"epoch": 0.21795989537925023,
"grad_norm": 0.5540204048156738,
"learning_rate": 9.978204010462076e-06,
"loss": 6.6142,
"step": 1500
},
{
"epoch": 0.2906131938390003,
"grad_norm": 0.5443109273910522,
"learning_rate": 9.970938680616102e-06,
"loss": 6.3813,
"step": 2000
},
{
"epoch": 0.36326649229875035,
"grad_norm": 0.6196579337120056,
"learning_rate": 9.963673350770125e-06,
"loss": 6.2207,
"step": 2500
},
{
"epoch": 0.43591979075850046,
"grad_norm": 0.5789604187011719,
"learning_rate": 9.95640802092415e-06,
"loss": 6.0957,
"step": 3000
},
{
"epoch": 0.5085730892182505,
"grad_norm": 0.63157719373703,
"learning_rate": 9.949142691078175e-06,
"loss": 5.9962,
"step": 3500
},
{
"epoch": 0.5812263876780006,
"grad_norm": 0.5619252920150757,
"learning_rate": 9.9418773612322e-06,
"loss": 5.9116,
"step": 4000
},
{
"epoch": 0.6538796861377506,
"grad_norm": 0.6025997996330261,
"learning_rate": 9.934612031386226e-06,
"loss": 5.8332,
"step": 4500
},
{
"epoch": 0.7265329845975007,
"grad_norm": 0.6860383152961731,
"learning_rate": 9.927346701540251e-06,
"loss": 5.7695,
"step": 5000
},
{
"epoch": 0.7991862830572508,
"grad_norm": 0.7735297679901123,
"learning_rate": 9.920081371694276e-06,
"loss": 5.7068,
"step": 5500
},
{
"epoch": 0.8718395815170009,
"grad_norm": 0.6047775745391846,
"learning_rate": 9.912816041848301e-06,
"loss": 5.65,
"step": 6000
},
{
"epoch": 0.9444928799767509,
"grad_norm": 0.8214231133460999,
"learning_rate": 9.905550712002325e-06,
"loss": 5.6052,
"step": 6500
},
{
"epoch": 1.017146178436501,
"grad_norm": 0.8017473816871643,
"learning_rate": 9.89828538215635e-06,
"loss": 5.5515,
"step": 7000
},
{
"epoch": 1.0897994768962511,
"grad_norm": 0.7803598642349243,
"learning_rate": 9.891020052310377e-06,
"loss": 5.5058,
"step": 7500
},
{
"epoch": 1.1624527753560012,
"grad_norm": 0.7102193236351013,
"learning_rate": 9.8837547224644e-06,
"loss": 5.4684,
"step": 8000
},
{
"epoch": 1.2351060738157513,
"grad_norm": 0.7121894359588623,
"learning_rate": 9.876489392618425e-06,
"loss": 5.4271,
"step": 8500
},
{
"epoch": 1.3077593722755014,
"grad_norm": 0.7547861337661743,
"learning_rate": 9.86922406277245e-06,
"loss": 5.397,
"step": 9000
},
{
"epoch": 1.3804126707352513,
"grad_norm": 0.7697557210922241,
"learning_rate": 9.861958732926476e-06,
"loss": 5.3563,
"step": 9500
},
{
"epoch": 1.4530659691950014,
"grad_norm": 0.8195068836212158,
"learning_rate": 9.854693403080501e-06,
"loss": 5.3228,
"step": 10000
},
{
"epoch": 1.5257192676547515,
"grad_norm": 0.7637478113174438,
"learning_rate": 9.847428073234524e-06,
"loss": 5.2946,
"step": 10500
},
{
"epoch": 1.5983725661145016,
"grad_norm": 0.8972837328910828,
"learning_rate": 9.840162743388551e-06,
"loss": 5.2588,
"step": 11000
},
{
"epoch": 1.6710258645742515,
"grad_norm": 0.9061026573181152,
"learning_rate": 9.832897413542576e-06,
"loss": 5.2321,
"step": 11500
},
{
"epoch": 1.7436791630340016,
"grad_norm": 0.7589295506477356,
"learning_rate": 9.8256320836966e-06,
"loss": 5.2065,
"step": 12000
},
{
"epoch": 1.8163324614937517,
"grad_norm": 0.8363276720046997,
"learning_rate": 9.818366753850625e-06,
"loss": 5.1819,
"step": 12500
},
{
"epoch": 1.8889857599535018,
"grad_norm": 0.8336675763130188,
"learning_rate": 9.81110142400465e-06,
"loss": 5.1575,
"step": 13000
},
{
"epoch": 1.961639058413252,
"grad_norm": 0.8027577996253967,
"learning_rate": 9.803836094158675e-06,
"loss": 5.1314,
"step": 13500
},
{
"epoch": 2.034292356873002,
"grad_norm": 0.7707503437995911,
"learning_rate": 9.7965707643127e-06,
"loss": 5.1088,
"step": 14000
},
{
"epoch": 2.106945655332752,
"grad_norm": 0.8998398184776306,
"learning_rate": 9.789305434466726e-06,
"loss": 5.0808,
"step": 14500
},
{
"epoch": 2.1795989537925022,
"grad_norm": 0.8320822715759277,
"learning_rate": 9.782040104620751e-06,
"loss": 5.0634,
"step": 15000
},
{
"epoch": 2.2522522522522523,
"grad_norm": 0.8281899094581604,
"learning_rate": 9.774774774774776e-06,
"loss": 5.0444,
"step": 15500
},
{
"epoch": 2.3249055507120024,
"grad_norm": 0.8026254177093506,
"learning_rate": 9.767509444928801e-06,
"loss": 5.0179,
"step": 16000
},
{
"epoch": 2.3975588491717525,
"grad_norm": 0.776054322719574,
"learning_rate": 9.760244115082825e-06,
"loss": 5.0009,
"step": 16500
},
{
"epoch": 2.4702121476315027,
"grad_norm": 0.8994656801223755,
"learning_rate": 9.75297878523685e-06,
"loss": 4.9812,
"step": 17000
},
{
"epoch": 2.5428654460912528,
"grad_norm": 0.8583637475967407,
"learning_rate": 9.745713455390875e-06,
"loss": 4.9628,
"step": 17500
},
{
"epoch": 2.615518744551003,
"grad_norm": 0.893622636795044,
"learning_rate": 9.7384481255449e-06,
"loss": 4.9464,
"step": 18000
},
{
"epoch": 2.688172043010753,
"grad_norm": 0.8372634053230286,
"learning_rate": 9.731182795698925e-06,
"loss": 4.9304,
"step": 18500
},
{
"epoch": 2.7608253414705026,
"grad_norm": 0.9035200476646423,
"learning_rate": 9.72391746585295e-06,
"loss": 4.9127,
"step": 19000
},
{
"epoch": 2.8334786399302527,
"grad_norm": 0.8660902380943298,
"learning_rate": 9.716652136006976e-06,
"loss": 4.8998,
"step": 19500
},
{
"epoch": 2.906131938390003,
"grad_norm": 0.9463688731193542,
"learning_rate": 9.709386806161001e-06,
"loss": 4.8778,
"step": 20000
},
{
"epoch": 2.978785236849753,
"grad_norm": 0.8671707510948181,
"learning_rate": 9.702121476315024e-06,
"loss": 4.8658,
"step": 20500
},
{
"epoch": 3.051438535309503,
"grad_norm": 0.9653003215789795,
"learning_rate": 9.69485614646905e-06,
"loss": 4.853,
"step": 21000
},
{
"epoch": 3.124091833769253,
"grad_norm": 0.8984787464141846,
"learning_rate": 9.687590816623077e-06,
"loss": 4.8364,
"step": 21500
},
{
"epoch": 3.1967451322290033,
"grad_norm": 0.8803947567939758,
"learning_rate": 9.6803254867771e-06,
"loss": 4.8177,
"step": 22000
},
{
"epoch": 3.2693984306887534,
"grad_norm": 0.8390781283378601,
"learning_rate": 9.673060156931125e-06,
"loss": 4.8077,
"step": 22500
},
{
"epoch": 3.3420517291485035,
"grad_norm": 0.9399817585945129,
"learning_rate": 9.66579482708515e-06,
"loss": 4.7942,
"step": 23000
},
{
"epoch": 3.4147050276082536,
"grad_norm": 0.864647626876831,
"learning_rate": 9.658529497239176e-06,
"loss": 4.7819,
"step": 23500
},
{
"epoch": 3.4873583260680037,
"grad_norm": 0.863230288028717,
"learning_rate": 9.6512641673932e-06,
"loss": 4.7675,
"step": 24000
},
{
"epoch": 3.5600116245277533,
"grad_norm": 0.9053711891174316,
"learning_rate": 9.643998837547224e-06,
"loss": 4.7562,
"step": 24500
},
{
"epoch": 3.6326649229875034,
"grad_norm": 0.9143590331077576,
"learning_rate": 9.636733507701251e-06,
"loss": 4.7408,
"step": 25000
},
{
"epoch": 3.7053182214472535,
"grad_norm": 0.8819567561149597,
"learning_rate": 9.629468177855276e-06,
"loss": 4.7347,
"step": 25500
},
{
"epoch": 3.7779715199070036,
"grad_norm": 0.9433513283729553,
"learning_rate": 9.6222028480093e-06,
"loss": 4.7227,
"step": 26000
},
{
"epoch": 3.8506248183667537,
"grad_norm": 0.9881005883216858,
"learning_rate": 9.614937518163325e-06,
"loss": 4.7096,
"step": 26500
},
{
"epoch": 3.923278116826504,
"grad_norm": 0.890139639377594,
"learning_rate": 9.60767218831735e-06,
"loss": 4.7,
"step": 27000
},
{
"epoch": 3.995931415286254,
"grad_norm": 0.9619746804237366,
"learning_rate": 9.600406858471375e-06,
"loss": 4.6879,
"step": 27500
},
{
"epoch": 4.068584713746004,
"grad_norm": 0.8994712233543396,
"learning_rate": 9.5931415286254e-06,
"loss": 4.6762,
"step": 28000
},
{
"epoch": 4.141238012205754,
"grad_norm": 0.9636611938476562,
"learning_rate": 9.585876198779426e-06,
"loss": 4.6643,
"step": 28500
},
{
"epoch": 4.213891310665504,
"grad_norm": 0.8346700072288513,
"learning_rate": 9.57861086893345e-06,
"loss": 4.6541,
"step": 29000
},
{
"epoch": 4.286544609125254,
"grad_norm": 0.8618379831314087,
"learning_rate": 9.571345539087476e-06,
"loss": 4.6473,
"step": 29500
},
{
"epoch": 4.3591979075850045,
"grad_norm": 0.9965615272521973,
"learning_rate": 9.564080209241501e-06,
"loss": 4.636,
"step": 30000
},
{
"epoch": 4.431851206044755,
"grad_norm": 0.9595836400985718,
"learning_rate": 9.556814879395525e-06,
"loss": 4.6262,
"step": 30500
},
{
"epoch": 4.504504504504505,
"grad_norm": 0.918498694896698,
"learning_rate": 9.54954954954955e-06,
"loss": 4.6139,
"step": 31000
},
{
"epoch": 4.577157802964255,
"grad_norm": 0.9323708415031433,
"learning_rate": 9.542284219703575e-06,
"loss": 4.6138,
"step": 31500
},
{
"epoch": 4.649811101424005,
"grad_norm": 0.9415541291236877,
"learning_rate": 9.5350188898576e-06,
"loss": 4.6008,
"step": 32000
},
{
"epoch": 4.722464399883755,
"grad_norm": 0.9424421787261963,
"learning_rate": 9.527753560011625e-06,
"loss": 4.5893,
"step": 32500
},
{
"epoch": 4.795117698343505,
"grad_norm": 0.942669689655304,
"learning_rate": 9.52048823016565e-06,
"loss": 4.5866,
"step": 33000
},
{
"epoch": 4.867770996803255,
"grad_norm": 0.9807618856430054,
"learning_rate": 9.513222900319676e-06,
"loss": 4.5736,
"step": 33500
},
{
"epoch": 4.940424295263005,
"grad_norm": 0.960895299911499,
"learning_rate": 9.5059575704737e-06,
"loss": 4.5619,
"step": 34000
},
{
"epoch": 5.013077593722755,
"grad_norm": 0.9801125526428223,
"learning_rate": 9.498692240627724e-06,
"loss": 4.5551,
"step": 34500
},
{
"epoch": 5.0857308921825055,
"grad_norm": 0.9244375228881836,
"learning_rate": 9.49142691078175e-06,
"loss": 4.5408,
"step": 35000
},
{
"epoch": 5.158384190642255,
"grad_norm": 0.9349271059036255,
"learning_rate": 9.484161580935776e-06,
"loss": 4.5358,
"step": 35500
},
{
"epoch": 5.231037489102005,
"grad_norm": 0.9496700763702393,
"learning_rate": 9.4768962510898e-06,
"loss": 4.5284,
"step": 36000
},
{
"epoch": 5.303690787561755,
"grad_norm": 0.8824469447135925,
"learning_rate": 9.469630921243825e-06,
"loss": 4.5225,
"step": 36500
},
{
"epoch": 5.376344086021505,
"grad_norm": 0.9746178984642029,
"learning_rate": 9.46236559139785e-06,
"loss": 4.5084,
"step": 37000
},
{
"epoch": 5.448997384481255,
"grad_norm": 0.9742151498794556,
"learning_rate": 9.455100261551875e-06,
"loss": 4.5036,
"step": 37500
},
{
"epoch": 5.521650682941005,
"grad_norm": 1.0122525691986084,
"learning_rate": 9.4478349317059e-06,
"loss": 4.4951,
"step": 38000
},
{
"epoch": 5.594303981400755,
"grad_norm": 0.9454242587089539,
"learning_rate": 9.440569601859924e-06,
"loss": 4.4844,
"step": 38500
},
{
"epoch": 5.6669572798605055,
"grad_norm": 0.9594370722770691,
"learning_rate": 9.43330427201395e-06,
"loss": 4.4776,
"step": 39000
},
{
"epoch": 5.739610578320256,
"grad_norm": 0.9644564986228943,
"learning_rate": 9.426038942167976e-06,
"loss": 4.4748,
"step": 39500
},
{
"epoch": 5.812263876780006,
"grad_norm": 0.9564418792724609,
"learning_rate": 9.418773612322e-06,
"loss": 4.4592,
"step": 40000
},
{
"epoch": 5.884917175239756,
"grad_norm": 0.9269986152648926,
"learning_rate": 9.411508282476025e-06,
"loss": 4.455,
"step": 40500
},
{
"epoch": 5.957570473699506,
"grad_norm": 0.9712278842926025,
"learning_rate": 9.40424295263005e-06,
"loss": 4.4455,
"step": 41000
},
{
"epoch": 6.030223772159256,
"grad_norm": 0.9372689723968506,
"learning_rate": 9.396977622784075e-06,
"loss": 4.4318,
"step": 41500
},
{
"epoch": 6.102877070619006,
"grad_norm": 0.942115843296051,
"learning_rate": 9.3897122929381e-06,
"loss": 4.4264,
"step": 42000
},
{
"epoch": 6.175530369078756,
"grad_norm": 1.0541362762451172,
"learning_rate": 9.382446963092125e-06,
"loss": 4.4163,
"step": 42500
},
{
"epoch": 6.248183667538506,
"grad_norm": 0.9615015387535095,
"learning_rate": 9.37518163324615e-06,
"loss": 4.4096,
"step": 43000
},
{
"epoch": 6.320836965998256,
"grad_norm": 1.0179786682128906,
"learning_rate": 9.367916303400176e-06,
"loss": 4.4034,
"step": 43500
},
{
"epoch": 6.3934902644580065,
"grad_norm": 0.9816853404045105,
"learning_rate": 9.360650973554201e-06,
"loss": 4.3956,
"step": 44000
},
{
"epoch": 6.466143562917757,
"grad_norm": 1.0108842849731445,
"learning_rate": 9.353385643708224e-06,
"loss": 4.3811,
"step": 44500
},
{
"epoch": 6.538796861377507,
"grad_norm": 1.0273536443710327,
"learning_rate": 9.34612031386225e-06,
"loss": 4.3748,
"step": 45000
},
{
"epoch": 6.611450159837257,
"grad_norm": 0.9984000325202942,
"learning_rate": 9.338854984016275e-06,
"loss": 4.3711,
"step": 45500
},
{
"epoch": 6.684103458297007,
"grad_norm": 0.9320149421691895,
"learning_rate": 9.3315896541703e-06,
"loss": 4.3621,
"step": 46000
},
{
"epoch": 6.756756756756757,
"grad_norm": 1.1042180061340332,
"learning_rate": 9.324324324324325e-06,
"loss": 4.3516,
"step": 46500
},
{
"epoch": 6.829410055216507,
"grad_norm": 0.9890114665031433,
"learning_rate": 9.31705899447835e-06,
"loss": 4.3464,
"step": 47000
},
{
"epoch": 6.902063353676257,
"grad_norm": 1.034157156944275,
"learning_rate": 9.309793664632375e-06,
"loss": 4.3374,
"step": 47500
},
{
"epoch": 6.974716652136007,
"grad_norm": 0.9817051887512207,
"learning_rate": 9.3025283347864e-06,
"loss": 4.3284,
"step": 48000
},
{
"epoch": 7.047369950595757,
"grad_norm": 1.0558934211730957,
"learning_rate": 9.295263004940424e-06,
"loss": 4.3154,
"step": 48500
},
{
"epoch": 7.1200232490555075,
"grad_norm": 0.9428159594535828,
"learning_rate": 9.28799767509445e-06,
"loss": 4.3088,
"step": 49000
},
{
"epoch": 7.192676547515257,
"grad_norm": 1.0073195695877075,
"learning_rate": 9.280732345248476e-06,
"loss": 4.2983,
"step": 49500
},
{
"epoch": 7.265329845975007,
"grad_norm": 0.9252042174339294,
"learning_rate": 9.2734670154025e-06,
"loss": 4.2912,
"step": 50000
},
{
"epoch": 7.337983144434757,
"grad_norm": 1.0058454275131226,
"learning_rate": 9.266201685556525e-06,
"loss": 4.2814,
"step": 50500
},
{
"epoch": 7.410636442894507,
"grad_norm": 1.0393654108047485,
"learning_rate": 9.25893635571055e-06,
"loss": 4.2693,
"step": 51000
},
{
"epoch": 7.483289741354257,
"grad_norm": 0.9429093599319458,
"learning_rate": 9.251671025864575e-06,
"loss": 4.2604,
"step": 51500
},
{
"epoch": 7.555943039814007,
"grad_norm": 0.9522086381912231,
"learning_rate": 9.2444056960186e-06,
"loss": 4.2499,
"step": 52000
},
{
"epoch": 7.628596338273757,
"grad_norm": 0.9854516386985779,
"learning_rate": 9.237140366172624e-06,
"loss": 4.2411,
"step": 52500
},
{
"epoch": 7.7012496367335075,
"grad_norm": 0.9321109056472778,
"learning_rate": 9.22987503632665e-06,
"loss": 4.2257,
"step": 53000
},
{
"epoch": 7.773902935193258,
"grad_norm": 0.9426267147064209,
"learning_rate": 9.222609706480676e-06,
"loss": 4.2225,
"step": 53500
},
{
"epoch": 7.846556233653008,
"grad_norm": 0.9784730672836304,
"learning_rate": 9.2153443766347e-06,
"loss": 4.2077,
"step": 54000
},
{
"epoch": 7.919209532112758,
"grad_norm": 0.9224970936775208,
"learning_rate": 9.208079046788724e-06,
"loss": 4.2029,
"step": 54500
},
{
"epoch": 7.991862830572508,
"grad_norm": 0.9461036920547485,
"learning_rate": 9.20081371694275e-06,
"loss": 4.1932,
"step": 55000
},
{
"epoch": 8.064516129032258,
"grad_norm": 0.9398289322853088,
"learning_rate": 9.193548387096775e-06,
"loss": 4.182,
"step": 55500
},
{
"epoch": 8.137169427492008,
"grad_norm": 0.9409332275390625,
"learning_rate": 9.1862830572508e-06,
"loss": 4.1696,
"step": 56000
},
{
"epoch": 8.209822725951758,
"grad_norm": 0.9343535304069519,
"learning_rate": 9.179017727404825e-06,
"loss": 4.1627,
"step": 56500
},
{
"epoch": 8.282476024411508,
"grad_norm": 0.96622234582901,
"learning_rate": 9.17175239755885e-06,
"loss": 4.1569,
"step": 57000
},
{
"epoch": 8.355129322871258,
"grad_norm": 0.9742193818092346,
"learning_rate": 9.164487067712876e-06,
"loss": 4.1494,
"step": 57500
},
{
"epoch": 8.427782621331009,
"grad_norm": 0.9126195907592773,
"learning_rate": 9.1572217378669e-06,
"loss": 4.1413,
"step": 58000
},
{
"epoch": 8.500435919790759,
"grad_norm": 0.978380024433136,
"learning_rate": 9.149956408020924e-06,
"loss": 4.1355,
"step": 58500
},
{
"epoch": 8.573089218250509,
"grad_norm": 0.943013608455658,
"learning_rate": 9.14269107817495e-06,
"loss": 4.1277,
"step": 59000
},
{
"epoch": 8.645742516710259,
"grad_norm": 0.9417549967765808,
"learning_rate": 9.135425748328975e-06,
"loss": 4.1254,
"step": 59500
},
{
"epoch": 8.718395815170009,
"grad_norm": 0.9427275061607361,
"learning_rate": 9.128160418483e-06,
"loss": 4.1183,
"step": 60000
},
{
"epoch": 8.791049113629759,
"grad_norm": 0.972618818283081,
"learning_rate": 9.120895088637025e-06,
"loss": 4.1094,
"step": 60500
},
{
"epoch": 8.86370241208951,
"grad_norm": 0.9143289923667908,
"learning_rate": 9.11362975879105e-06,
"loss": 4.1035,
"step": 61000
},
{
"epoch": 8.93635571054926,
"grad_norm": 0.961862325668335,
"learning_rate": 9.106364428945075e-06,
"loss": 4.1035,
"step": 61500
},
{
"epoch": 9.00900900900901,
"grad_norm": 0.9252375960350037,
"learning_rate": 9.0990990990991e-06,
"loss": 4.0923,
"step": 62000
},
{
"epoch": 9.08166230746876,
"grad_norm": 0.9321468472480774,
"learning_rate": 9.091833769253124e-06,
"loss": 4.0855,
"step": 62500
},
{
"epoch": 9.15431560592851,
"grad_norm": 0.9290264844894409,
"learning_rate": 9.084568439407149e-06,
"loss": 4.0847,
"step": 63000
},
{
"epoch": 9.22696890438826,
"grad_norm": 1.0008461475372314,
"learning_rate": 9.077303109561176e-06,
"loss": 4.0791,
"step": 63500
},
{
"epoch": 9.29962220284801,
"grad_norm": 1.0144729614257812,
"learning_rate": 9.0700377797152e-06,
"loss": 4.0724,
"step": 64000
},
{
"epoch": 9.321127579192096,
"step": 64148,
"total_flos": 5.363892569191219e+17,
"train_loss": 4.763968430426513,
"train_runtime": 93599.1484,
"train_samples_per_second": 705.822,
"train_steps_per_second": 7.353
}
],
"logging_steps": 500,
"max_steps": 688200,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 1000000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.363892569191219e+17,
"train_batch_size": 96,
"trial_name": null,
"trial_params": null
}