Elfsong's picture
End of training
de85a42 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9884169884169884,
"eval_steps": 500,
"global_step": 258,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007722007722007722,
"grad_norm": 3.7684099674224854,
"learning_rate": 0.0,
"loss": 1.1103,
"num_input_tokens_seen": 45712,
"step": 1
},
{
"epoch": 0.015444015444015444,
"grad_norm": 3.7689733505249023,
"learning_rate": 2.5e-06,
"loss": 1.0548,
"num_input_tokens_seen": 90856,
"step": 2
},
{
"epoch": 0.023166023166023165,
"grad_norm": 3.066856622695923,
"learning_rate": 5e-06,
"loss": 0.9604,
"num_input_tokens_seen": 141160,
"step": 3
},
{
"epoch": 0.03088803088803089,
"grad_norm": 2.7844741344451904,
"learning_rate": 7.5e-06,
"loss": 1.0064,
"num_input_tokens_seen": 191560,
"step": 4
},
{
"epoch": 0.03861003861003861,
"grad_norm": 1.7969169616699219,
"learning_rate": 1e-05,
"loss": 0.935,
"num_input_tokens_seen": 239792,
"step": 5
},
{
"epoch": 0.04633204633204633,
"grad_norm": 2.561210870742798,
"learning_rate": 1.25e-05,
"loss": 0.9258,
"num_input_tokens_seen": 286776,
"step": 6
},
{
"epoch": 0.05405405405405406,
"grad_norm": 2.740765333175659,
"learning_rate": 1.5e-05,
"loss": 0.9268,
"num_input_tokens_seen": 339256,
"step": 7
},
{
"epoch": 0.06177606177606178,
"grad_norm": 2.7144649028778076,
"learning_rate": 1.75e-05,
"loss": 1.023,
"num_input_tokens_seen": 388616,
"step": 8
},
{
"epoch": 0.0694980694980695,
"grad_norm": 2.0525765419006348,
"learning_rate": 2e-05,
"loss": 0.9466,
"num_input_tokens_seen": 434136,
"step": 9
},
{
"epoch": 0.07722007722007722,
"grad_norm": 1.4776036739349365,
"learning_rate": 2.25e-05,
"loss": 0.8229,
"num_input_tokens_seen": 485424,
"step": 10
},
{
"epoch": 0.08494208494208494,
"grad_norm": 1.4184006452560425,
"learning_rate": 2.5e-05,
"loss": 0.8575,
"num_input_tokens_seen": 532232,
"step": 11
},
{
"epoch": 0.09266409266409266,
"grad_norm": 1.3195043802261353,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.8271,
"num_input_tokens_seen": 575152,
"step": 12
},
{
"epoch": 0.10038610038610038,
"grad_norm": 1.1674644947052002,
"learning_rate": 3e-05,
"loss": 0.7658,
"num_input_tokens_seen": 620992,
"step": 13
},
{
"epoch": 0.10810810810810811,
"grad_norm": 1.2842392921447754,
"learning_rate": 3.2500000000000004e-05,
"loss": 0.8372,
"num_input_tokens_seen": 671288,
"step": 14
},
{
"epoch": 0.11583011583011583,
"grad_norm": 1.2587034702301025,
"learning_rate": 3.5e-05,
"loss": 0.8108,
"num_input_tokens_seen": 722848,
"step": 15
},
{
"epoch": 0.12355212355212356,
"grad_norm": 1.1240705251693726,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.824,
"num_input_tokens_seen": 767632,
"step": 16
},
{
"epoch": 0.13127413127413126,
"grad_norm": 1.2230473756790161,
"learning_rate": 4e-05,
"loss": 0.7958,
"num_input_tokens_seen": 816304,
"step": 17
},
{
"epoch": 0.138996138996139,
"grad_norm": 1.0638173818588257,
"learning_rate": 4.25e-05,
"loss": 0.7784,
"num_input_tokens_seen": 865360,
"step": 18
},
{
"epoch": 0.14671814671814673,
"grad_norm": 0.9111860394477844,
"learning_rate": 4.5e-05,
"loss": 0.7205,
"num_input_tokens_seen": 919800,
"step": 19
},
{
"epoch": 0.15444015444015444,
"grad_norm": 1.1077393293380737,
"learning_rate": 4.75e-05,
"loss": 0.7929,
"num_input_tokens_seen": 965736,
"step": 20
},
{
"epoch": 0.16216216216216217,
"grad_norm": 0.9261448383331299,
"learning_rate": 5e-05,
"loss": 0.7326,
"num_input_tokens_seen": 1017352,
"step": 21
},
{
"epoch": 0.16988416988416988,
"grad_norm": 1.0034143924713135,
"learning_rate": 4.999782204181027e-05,
"loss": 0.749,
"num_input_tokens_seen": 1061344,
"step": 22
},
{
"epoch": 0.1776061776061776,
"grad_norm": 0.9668173789978027,
"learning_rate": 4.999128854672123e-05,
"loss": 0.7499,
"num_input_tokens_seen": 1109672,
"step": 23
},
{
"epoch": 0.18532818532818532,
"grad_norm": 0.8699672818183899,
"learning_rate": 4.9980400653107194e-05,
"loss": 0.6966,
"num_input_tokens_seen": 1159832,
"step": 24
},
{
"epoch": 0.19305019305019305,
"grad_norm": 0.863695502281189,
"learning_rate": 4.9965160258038346e-05,
"loss": 0.7161,
"num_input_tokens_seen": 1209240,
"step": 25
},
{
"epoch": 0.20077220077220076,
"grad_norm": 0.9278308749198914,
"learning_rate": 4.9945570016950135e-05,
"loss": 0.7119,
"num_input_tokens_seen": 1259568,
"step": 26
},
{
"epoch": 0.2084942084942085,
"grad_norm": 1.0021332502365112,
"learning_rate": 4.9921633343180654e-05,
"loss": 0.8077,
"num_input_tokens_seen": 1308136,
"step": 27
},
{
"epoch": 0.21621621621621623,
"grad_norm": 0.9220330715179443,
"learning_rate": 4.989335440737586e-05,
"loss": 0.7465,
"num_input_tokens_seen": 1356408,
"step": 28
},
{
"epoch": 0.22393822393822393,
"grad_norm": 0.9325352907180786,
"learning_rate": 4.986073813676296e-05,
"loss": 0.7342,
"num_input_tokens_seen": 1402072,
"step": 29
},
{
"epoch": 0.23166023166023167,
"grad_norm": 0.9855431914329529,
"learning_rate": 4.9823790214291836e-05,
"loss": 0.7492,
"num_input_tokens_seen": 1454592,
"step": 30
},
{
"epoch": 0.23938223938223938,
"grad_norm": 0.9169183373451233,
"learning_rate": 4.978251707764492e-05,
"loss": 0.7423,
"num_input_tokens_seen": 1500560,
"step": 31
},
{
"epoch": 0.2471042471042471,
"grad_norm": 0.9389194846153259,
"learning_rate": 4.973692591811549e-05,
"loss": 0.7463,
"num_input_tokens_seen": 1546960,
"step": 32
},
{
"epoch": 0.2548262548262548,
"grad_norm": 0.9559301733970642,
"learning_rate": 4.968702467935469e-05,
"loss": 0.7673,
"num_input_tokens_seen": 1598424,
"step": 33
},
{
"epoch": 0.2625482625482625,
"grad_norm": 0.9239445328712463,
"learning_rate": 4.963282205598744e-05,
"loss": 0.6737,
"num_input_tokens_seen": 1643656,
"step": 34
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.8067498803138733,
"learning_rate": 4.957432749209755e-05,
"loss": 0.68,
"num_input_tokens_seen": 1698008,
"step": 35
},
{
"epoch": 0.277992277992278,
"grad_norm": 0.8876193761825562,
"learning_rate": 4.9511551179582174e-05,
"loss": 0.7391,
"num_input_tokens_seen": 1743856,
"step": 36
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.8399716019630432,
"learning_rate": 4.944450405637602e-05,
"loss": 0.7076,
"num_input_tokens_seen": 1797032,
"step": 37
},
{
"epoch": 0.29343629343629346,
"grad_norm": 0.8189979791641235,
"learning_rate": 4.937319780454559e-05,
"loss": 0.6502,
"num_input_tokens_seen": 1847632,
"step": 38
},
{
"epoch": 0.30115830115830117,
"grad_norm": 0.8758053183555603,
"learning_rate": 4.929764484825369e-05,
"loss": 0.7052,
"num_input_tokens_seen": 1895000,
"step": 39
},
{
"epoch": 0.3088803088803089,
"grad_norm": 0.9134597778320312,
"learning_rate": 4.921785835159472e-05,
"loss": 0.7224,
"num_input_tokens_seen": 1936384,
"step": 40
},
{
"epoch": 0.3166023166023166,
"grad_norm": 0.8602584600448608,
"learning_rate": 4.9133852216300965e-05,
"loss": 0.7023,
"num_input_tokens_seen": 1984248,
"step": 41
},
{
"epoch": 0.32432432432432434,
"grad_norm": 0.8215887546539307,
"learning_rate": 4.9045641079320484e-05,
"loss": 0.7099,
"num_input_tokens_seen": 2029000,
"step": 42
},
{
"epoch": 0.33204633204633205,
"grad_norm": 0.8211591243743896,
"learning_rate": 4.89532403102667e-05,
"loss": 0.7397,
"num_input_tokens_seen": 2078944,
"step": 43
},
{
"epoch": 0.33976833976833976,
"grad_norm": 0.8224607110023499,
"learning_rate": 4.8856666008740583e-05,
"loss": 0.6995,
"num_input_tokens_seen": 2126352,
"step": 44
},
{
"epoch": 0.3474903474903475,
"grad_norm": 0.913007915019989,
"learning_rate": 4.875593500152538e-05,
"loss": 0.6849,
"num_input_tokens_seen": 2172368,
"step": 45
},
{
"epoch": 0.3552123552123552,
"grad_norm": 0.8476090431213379,
"learning_rate": 4.865106483965487e-05,
"loss": 0.7019,
"num_input_tokens_seen": 2217024,
"step": 46
},
{
"epoch": 0.36293436293436293,
"grad_norm": 0.7849616408348083,
"learning_rate": 4.8542073795355294e-05,
"loss": 0.6447,
"num_input_tokens_seen": 2266472,
"step": 47
},
{
"epoch": 0.37065637065637064,
"grad_norm": 0.8142545819282532,
"learning_rate": 4.842898085886164e-05,
"loss": 0.6961,
"num_input_tokens_seen": 2318480,
"step": 48
},
{
"epoch": 0.3783783783783784,
"grad_norm": 0.8318261504173279,
"learning_rate": 4.8311805735108894e-05,
"loss": 0.6864,
"num_input_tokens_seen": 2368432,
"step": 49
},
{
"epoch": 0.3861003861003861,
"grad_norm": 0.8627023696899414,
"learning_rate": 4.81905688402987e-05,
"loss": 0.6599,
"num_input_tokens_seen": 2413104,
"step": 50
},
{
"epoch": 0.3938223938223938,
"grad_norm": 0.8817498087882996,
"learning_rate": 4.806529129834208e-05,
"loss": 0.7159,
"num_input_tokens_seen": 2458232,
"step": 51
},
{
"epoch": 0.4015444015444015,
"grad_norm": 0.8289158940315247,
"learning_rate": 4.793599493717891e-05,
"loss": 0.6649,
"num_input_tokens_seen": 2501480,
"step": 52
},
{
"epoch": 0.4092664092664093,
"grad_norm": 0.794998824596405,
"learning_rate": 4.78027022849747e-05,
"loss": 0.6671,
"num_input_tokens_seen": 2548328,
"step": 53
},
{
"epoch": 0.416988416988417,
"grad_norm": 0.8775593638420105,
"learning_rate": 4.766543656619532e-05,
"loss": 0.6504,
"num_input_tokens_seen": 2592744,
"step": 54
},
{
"epoch": 0.4247104247104247,
"grad_norm": 0.7581058740615845,
"learning_rate": 4.752422169756048e-05,
"loss": 0.611,
"num_input_tokens_seen": 2648832,
"step": 55
},
{
"epoch": 0.43243243243243246,
"grad_norm": 0.8995907306671143,
"learning_rate": 4.7379082283876566e-05,
"loss": 0.6815,
"num_input_tokens_seen": 2695656,
"step": 56
},
{
"epoch": 0.44015444015444016,
"grad_norm": 0.8074026703834534,
"learning_rate": 4.723004361374953e-05,
"loss": 0.6714,
"num_input_tokens_seen": 2748024,
"step": 57
},
{
"epoch": 0.44787644787644787,
"grad_norm": 0.817803680896759,
"learning_rate": 4.707713165517877e-05,
"loss": 0.6611,
"num_input_tokens_seen": 2797448,
"step": 58
},
{
"epoch": 0.4555984555984556,
"grad_norm": 0.8251329660415649,
"learning_rate": 4.692037305103247e-05,
"loss": 0.638,
"num_input_tokens_seen": 2843456,
"step": 59
},
{
"epoch": 0.46332046332046334,
"grad_norm": 0.9807698130607605,
"learning_rate": 4.675979511440549e-05,
"loss": 0.7447,
"num_input_tokens_seen": 2888008,
"step": 60
},
{
"epoch": 0.47104247104247104,
"grad_norm": 0.8173831701278687,
"learning_rate": 4.659542582386041e-05,
"loss": 0.6413,
"num_input_tokens_seen": 2930816,
"step": 61
},
{
"epoch": 0.47876447876447875,
"grad_norm": 0.9126039147377014,
"learning_rate": 4.642729381855262e-05,
"loss": 0.6495,
"num_input_tokens_seen": 2976920,
"step": 62
},
{
"epoch": 0.4864864864864865,
"grad_norm": 0.9136143326759338,
"learning_rate": 4.625542839324036e-05,
"loss": 0.6735,
"num_input_tokens_seen": 3022496,
"step": 63
},
{
"epoch": 0.4942084942084942,
"grad_norm": 0.8095064759254456,
"learning_rate": 4.607985949318047e-05,
"loss": 0.6856,
"num_input_tokens_seen": 3071824,
"step": 64
},
{
"epoch": 0.5019305019305019,
"grad_norm": 0.868986189365387,
"learning_rate": 4.5900617708910854e-05,
"loss": 0.689,
"num_input_tokens_seen": 3119136,
"step": 65
},
{
"epoch": 0.5096525096525096,
"grad_norm": 0.8730259537696838,
"learning_rate": 4.571773427092047e-05,
"loss": 0.685,
"num_input_tokens_seen": 3160840,
"step": 66
},
{
"epoch": 0.5173745173745173,
"grad_norm": 0.7655907869338989,
"learning_rate": 4.553124104420784e-05,
"loss": 0.6211,
"num_input_tokens_seen": 3205472,
"step": 67
},
{
"epoch": 0.525096525096525,
"grad_norm": 0.8272292613983154,
"learning_rate": 4.5341170522729006e-05,
"loss": 0.6785,
"num_input_tokens_seen": 3255480,
"step": 68
},
{
"epoch": 0.5328185328185329,
"grad_norm": 0.7634907960891724,
"learning_rate": 4.514755582373587e-05,
"loss": 0.6477,
"num_input_tokens_seen": 3306024,
"step": 69
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.8490803241729736,
"learning_rate": 4.4950430682006e-05,
"loss": 0.7086,
"num_input_tokens_seen": 3350976,
"step": 70
},
{
"epoch": 0.5482625482625483,
"grad_norm": 0.7535845637321472,
"learning_rate": 4.4749829443964705e-05,
"loss": 0.6436,
"num_input_tokens_seen": 3400376,
"step": 71
},
{
"epoch": 0.555984555984556,
"grad_norm": 0.8641594052314758,
"learning_rate": 4.454578706170075e-05,
"loss": 0.6926,
"num_input_tokens_seen": 3446016,
"step": 72
},
{
"epoch": 0.5637065637065637,
"grad_norm": 0.8073216080665588,
"learning_rate": 4.433833908687633e-05,
"loss": 0.6917,
"num_input_tokens_seen": 3497296,
"step": 73
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.8128077387809753,
"learning_rate": 4.4127521664532704e-05,
"loss": 0.6608,
"num_input_tokens_seen": 3543456,
"step": 74
},
{
"epoch": 0.5791505791505791,
"grad_norm": 0.8153249621391296,
"learning_rate": 4.39133715267924e-05,
"loss": 0.6712,
"num_input_tokens_seen": 3587352,
"step": 75
},
{
"epoch": 0.5868725868725869,
"grad_norm": 0.748672604560852,
"learning_rate": 4.3695925986459115e-05,
"loss": 0.6644,
"num_input_tokens_seen": 3635888,
"step": 76
},
{
"epoch": 0.5945945945945946,
"grad_norm": 0.8283869028091431,
"learning_rate": 4.347522293051648e-05,
"loss": 0.6168,
"num_input_tokens_seen": 3684128,
"step": 77
},
{
"epoch": 0.6023166023166023,
"grad_norm": 0.8540968894958496,
"learning_rate": 4.3251300813526754e-05,
"loss": 0.687,
"num_input_tokens_seen": 3733704,
"step": 78
},
{
"epoch": 0.61003861003861,
"grad_norm": 0.7499772906303406,
"learning_rate": 4.3024198650930625e-05,
"loss": 0.6009,
"num_input_tokens_seen": 3777744,
"step": 79
},
{
"epoch": 0.6177606177606177,
"grad_norm": 0.7799640893936157,
"learning_rate": 4.279395601224928e-05,
"loss": 0.6559,
"num_input_tokens_seen": 3828288,
"step": 80
},
{
"epoch": 0.6254826254826255,
"grad_norm": 0.7528457045555115,
"learning_rate": 4.2560613014189966e-05,
"loss": 0.6037,
"num_input_tokens_seen": 3875936,
"step": 81
},
{
"epoch": 0.6332046332046332,
"grad_norm": 0.8638529181480408,
"learning_rate": 4.2324210313656176e-05,
"loss": 0.6906,
"num_input_tokens_seen": 3921376,
"step": 82
},
{
"epoch": 0.640926640926641,
"grad_norm": 0.8515468835830688,
"learning_rate": 4.208478910066371e-05,
"loss": 0.6672,
"num_input_tokens_seen": 3968400,
"step": 83
},
{
"epoch": 0.6486486486486487,
"grad_norm": 0.7536445260047913,
"learning_rate": 4.184239109116393e-05,
"loss": 0.6584,
"num_input_tokens_seen": 4016608,
"step": 84
},
{
"epoch": 0.6563706563706564,
"grad_norm": 0.8643118739128113,
"learning_rate": 4.159705851977521e-05,
"loss": 0.6464,
"num_input_tokens_seen": 4064472,
"step": 85
},
{
"epoch": 0.6640926640926641,
"grad_norm": 0.8485602140426636,
"learning_rate": 4.134883413242421e-05,
"loss": 0.6592,
"num_input_tokens_seen": 4114672,
"step": 86
},
{
"epoch": 0.6718146718146718,
"grad_norm": 0.8153643012046814,
"learning_rate": 4.109776117889789e-05,
"loss": 0.5927,
"num_input_tokens_seen": 4158376,
"step": 87
},
{
"epoch": 0.6795366795366795,
"grad_norm": 0.7843388915061951,
"learning_rate": 4.084388340530791e-05,
"loss": 0.6028,
"num_input_tokens_seen": 4207504,
"step": 88
},
{
"epoch": 0.6872586872586872,
"grad_norm": 0.8065616488456726,
"learning_rate": 4.058724504646834e-05,
"loss": 0.6888,
"num_input_tokens_seen": 4259504,
"step": 89
},
{
"epoch": 0.694980694980695,
"grad_norm": 0.8020586371421814,
"learning_rate": 4.032789081818843e-05,
"loss": 0.6555,
"num_input_tokens_seen": 4303104,
"step": 90
},
{
"epoch": 0.7027027027027027,
"grad_norm": 0.7873929738998413,
"learning_rate": 4.0065865909481417e-05,
"loss": 0.6481,
"num_input_tokens_seen": 4352824,
"step": 91
},
{
"epoch": 0.7104247104247104,
"grad_norm": 0.8812729716300964,
"learning_rate": 3.980121597469096e-05,
"loss": 0.631,
"num_input_tokens_seen": 4398424,
"step": 92
},
{
"epoch": 0.7181467181467182,
"grad_norm": 0.748152494430542,
"learning_rate": 3.95339871255365e-05,
"loss": 0.6168,
"num_input_tokens_seen": 4446624,
"step": 93
},
{
"epoch": 0.7258687258687259,
"grad_norm": 0.7249035239219666,
"learning_rate": 3.926422592307888e-05,
"loss": 0.6152,
"num_input_tokens_seen": 4493880,
"step": 94
},
{
"epoch": 0.7335907335907336,
"grad_norm": 0.8587039113044739,
"learning_rate": 3.89919793696077e-05,
"loss": 0.6533,
"num_input_tokens_seen": 4541640,
"step": 95
},
{
"epoch": 0.7413127413127413,
"grad_norm": 0.7924438118934631,
"learning_rate": 3.871729490045185e-05,
"loss": 0.6414,
"num_input_tokens_seen": 4589296,
"step": 96
},
{
"epoch": 0.749034749034749,
"grad_norm": 0.8338213562965393,
"learning_rate": 3.844022037571443e-05,
"loss": 0.71,
"num_input_tokens_seen": 4639848,
"step": 97
},
{
"epoch": 0.7567567567567568,
"grad_norm": 0.7877117991447449,
"learning_rate": 3.81608040719339e-05,
"loss": 0.6575,
"num_input_tokens_seen": 4683808,
"step": 98
},
{
"epoch": 0.7644787644787645,
"grad_norm": 0.7668321132659912,
"learning_rate": 3.78790946736724e-05,
"loss": 0.6277,
"num_input_tokens_seen": 4733296,
"step": 99
},
{
"epoch": 0.7722007722007722,
"grad_norm": 0.8455983996391296,
"learning_rate": 3.759514126503324e-05,
"loss": 0.6586,
"num_input_tokens_seen": 4778416,
"step": 100
},
{
"epoch": 0.7799227799227799,
"grad_norm": 0.757305383682251,
"learning_rate": 3.7308993321108556e-05,
"loss": 0.6535,
"num_input_tokens_seen": 4828584,
"step": 101
},
{
"epoch": 0.7876447876447876,
"grad_norm": 0.7417029142379761,
"learning_rate": 3.702070069935898e-05,
"loss": 0.6324,
"num_input_tokens_seen": 4877424,
"step": 102
},
{
"epoch": 0.7953667953667953,
"grad_norm": 0.7691876292228699,
"learning_rate": 3.673031363092666e-05,
"loss": 0.607,
"num_input_tokens_seen": 4922312,
"step": 103
},
{
"epoch": 0.803088803088803,
"grad_norm": 0.8705837726593018,
"learning_rate": 3.6437882711883084e-05,
"loss": 0.6511,
"num_input_tokens_seen": 4968208,
"step": 104
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.7560796141624451,
"learning_rate": 3.6143458894413465e-05,
"loss": 0.6331,
"num_input_tokens_seen": 5018880,
"step": 105
},
{
"epoch": 0.8185328185328186,
"grad_norm": 0.7419978380203247,
"learning_rate": 3.5847093477938956e-05,
"loss": 0.6489,
"num_input_tokens_seen": 5069312,
"step": 106
},
{
"epoch": 0.8262548262548263,
"grad_norm": 0.7836557626724243,
"learning_rate": 3.554883810017844e-05,
"loss": 0.6151,
"num_input_tokens_seen": 5113280,
"step": 107
},
{
"epoch": 0.833976833976834,
"grad_norm": 0.8063251376152039,
"learning_rate": 3.5248744728151345e-05,
"loss": 0.6362,
"num_input_tokens_seen": 5158416,
"step": 108
},
{
"epoch": 0.8416988416988417,
"grad_norm": 0.7751627564430237,
"learning_rate": 3.494686564912302e-05,
"loss": 0.6611,
"num_input_tokens_seen": 5209288,
"step": 109
},
{
"epoch": 0.8494208494208494,
"grad_norm": 0.7138671278953552,
"learning_rate": 3.464325346149449e-05,
"loss": 0.6323,
"num_input_tokens_seen": 5264480,
"step": 110
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.806349515914917,
"learning_rate": 3.433796106563779e-05,
"loss": 0.6324,
"num_input_tokens_seen": 5308480,
"step": 111
},
{
"epoch": 0.8648648648648649,
"grad_norm": 0.7439104914665222,
"learning_rate": 3.403104165467883e-05,
"loss": 0.62,
"num_input_tokens_seen": 5355096,
"step": 112
},
{
"epoch": 0.8725868725868726,
"grad_norm": 0.7520055770874023,
"learning_rate": 3.3722548705229175e-05,
"loss": 0.6024,
"num_input_tokens_seen": 5401288,
"step": 113
},
{
"epoch": 0.8803088803088803,
"grad_norm": 0.7431480884552002,
"learning_rate": 3.341253596806848e-05,
"loss": 0.6548,
"num_input_tokens_seen": 5453648,
"step": 114
},
{
"epoch": 0.888030888030888,
"grad_norm": 0.7664536833763123,
"learning_rate": 3.310105745877915e-05,
"loss": 0.626,
"num_input_tokens_seen": 5502248,
"step": 115
},
{
"epoch": 0.8957528957528957,
"grad_norm": 0.8179628849029541,
"learning_rate": 3.278816744833479e-05,
"loss": 0.6515,
"num_input_tokens_seen": 5550512,
"step": 116
},
{
"epoch": 0.9034749034749034,
"grad_norm": 0.7596731185913086,
"learning_rate": 3.247392045364426e-05,
"loss": 0.6619,
"num_input_tokens_seen": 5603224,
"step": 117
},
{
"epoch": 0.9111969111969112,
"grad_norm": 0.7250100374221802,
"learning_rate": 3.215837122805282e-05,
"loss": 0.6463,
"num_input_tokens_seen": 5653152,
"step": 118
},
{
"epoch": 0.918918918918919,
"grad_norm": 0.7781504988670349,
"learning_rate": 3.1841574751802076e-05,
"loss": 0.6135,
"num_input_tokens_seen": 5699968,
"step": 119
},
{
"epoch": 0.9266409266409267,
"grad_norm": 0.7662553191184998,
"learning_rate": 3.152358622245042e-05,
"loss": 0.6147,
"num_input_tokens_seen": 5750360,
"step": 120
},
{
"epoch": 0.9343629343629344,
"grad_norm": 0.7741676568984985,
"learning_rate": 3.1204461045255604e-05,
"loss": 0.6018,
"num_input_tokens_seen": 5793216,
"step": 121
},
{
"epoch": 0.9420849420849421,
"grad_norm": 0.7969425916671753,
"learning_rate": 3.088425482352107e-05,
"loss": 0.6716,
"num_input_tokens_seen": 5838920,
"step": 122
},
{
"epoch": 0.9498069498069498,
"grad_norm": 0.7865390181541443,
"learning_rate": 3.056302334890786e-05,
"loss": 0.6123,
"num_input_tokens_seen": 5885064,
"step": 123
},
{
"epoch": 0.9575289575289575,
"grad_norm": 0.7254599332809448,
"learning_rate": 3.024082259171367e-05,
"loss": 0.5959,
"num_input_tokens_seen": 5934624,
"step": 124
},
{
"epoch": 0.9652509652509652,
"grad_norm": 0.7473117709159851,
"learning_rate": 2.9917708691120706e-05,
"loss": 0.6206,
"num_input_tokens_seen": 5987336,
"step": 125
},
{
"epoch": 0.972972972972973,
"grad_norm": 0.7521440982818604,
"learning_rate": 2.9593737945414264e-05,
"loss": 0.6213,
"num_input_tokens_seen": 6035896,
"step": 126
},
{
"epoch": 0.9806949806949807,
"grad_norm": 0.8182458281517029,
"learning_rate": 2.9268966802173436e-05,
"loss": 0.6743,
"num_input_tokens_seen": 6080816,
"step": 127
},
{
"epoch": 0.9884169884169884,
"grad_norm": 0.6886686086654663,
"learning_rate": 2.8943451848435936e-05,
"loss": 0.5528,
"num_input_tokens_seen": 6126984,
"step": 128
},
{
"epoch": 0.9961389961389961,
"grad_norm": 0.764229953289032,
"learning_rate": 2.8617249800838513e-05,
"loss": 0.6412,
"num_input_tokens_seen": 6178704,
"step": 129
},
{
"epoch": 1.0,
"grad_norm": 1.057747483253479,
"learning_rate": 2.8290417495734837e-05,
"loss": 0.6122,
"num_input_tokens_seen": 6201760,
"step": 130
},
{
"epoch": 1.0077220077220077,
"grad_norm": 0.9056001901626587,
"learning_rate": 2.7963011879292573e-05,
"loss": 0.412,
"num_input_tokens_seen": 6246264,
"step": 131
},
{
"epoch": 1.0154440154440154,
"grad_norm": 0.8688384890556335,
"learning_rate": 2.7635089997571196e-05,
"loss": 0.4194,
"num_input_tokens_seen": 6293720,
"step": 132
},
{
"epoch": 1.0231660231660231,
"grad_norm": 0.7179760336875916,
"learning_rate": 2.7306708986582553e-05,
"loss": 0.3895,
"num_input_tokens_seen": 6348992,
"step": 133
},
{
"epoch": 1.0308880308880308,
"grad_norm": 0.7055646181106567,
"learning_rate": 2.6977926062335617e-05,
"loss": 0.3801,
"num_input_tokens_seen": 6396000,
"step": 134
},
{
"epoch": 1.0386100386100385,
"grad_norm": 0.7302276492118835,
"learning_rate": 2.6648798510867386e-05,
"loss": 0.3523,
"num_input_tokens_seen": 6439376,
"step": 135
},
{
"epoch": 1.0463320463320462,
"grad_norm": 0.8147051930427551,
"learning_rate": 2.6319383678261562e-05,
"loss": 0.3759,
"num_input_tokens_seen": 6484320,
"step": 136
},
{
"epoch": 1.054054054054054,
"grad_norm": 1.063301682472229,
"learning_rate": 2.598973896065674e-05,
"loss": 0.3866,
"num_input_tokens_seen": 6531032,
"step": 137
},
{
"epoch": 1.0617760617760619,
"grad_norm": 0.9116234183311462,
"learning_rate": 2.565992179424591e-05,
"loss": 0.3943,
"num_input_tokens_seen": 6581096,
"step": 138
},
{
"epoch": 1.0694980694980696,
"grad_norm": 0.8477054834365845,
"learning_rate": 2.5329989645268977e-05,
"loss": 0.3617,
"num_input_tokens_seen": 6632824,
"step": 139
},
{
"epoch": 1.0772200772200773,
"grad_norm": 0.8234580755233765,
"learning_rate": 2.5e-05,
"loss": 0.3467,
"num_input_tokens_seen": 6681960,
"step": 140
},
{
"epoch": 1.084942084942085,
"grad_norm": 0.8218559622764587,
"learning_rate": 2.467001035473103e-05,
"loss": 0.3829,
"num_input_tokens_seen": 6731624,
"step": 141
},
{
"epoch": 1.0926640926640927,
"grad_norm": 0.8188300132751465,
"learning_rate": 2.43400782057541e-05,
"loss": 0.3873,
"num_input_tokens_seen": 6778008,
"step": 142
},
{
"epoch": 1.1003861003861004,
"grad_norm": 0.734234094619751,
"learning_rate": 2.401026103934327e-05,
"loss": 0.3874,
"num_input_tokens_seen": 6834336,
"step": 143
},
{
"epoch": 1.1081081081081081,
"grad_norm": 0.7244148254394531,
"learning_rate": 2.368061632173844e-05,
"loss": 0.3628,
"num_input_tokens_seen": 6885472,
"step": 144
},
{
"epoch": 1.1158301158301158,
"grad_norm": 0.7350901365280151,
"learning_rate": 2.3351201489132616e-05,
"loss": 0.3552,
"num_input_tokens_seen": 6935216,
"step": 145
},
{
"epoch": 1.1235521235521235,
"grad_norm": 0.7877046465873718,
"learning_rate": 2.3022073937664386e-05,
"loss": 0.3953,
"num_input_tokens_seen": 6981112,
"step": 146
},
{
"epoch": 1.1312741312741312,
"grad_norm": 0.82071453332901,
"learning_rate": 2.2693291013417453e-05,
"loss": 0.425,
"num_input_tokens_seen": 7027848,
"step": 147
},
{
"epoch": 1.138996138996139,
"grad_norm": 0.718041181564331,
"learning_rate": 2.236491000242881e-05,
"loss": 0.3766,
"num_input_tokens_seen": 7079048,
"step": 148
},
{
"epoch": 1.1467181467181466,
"grad_norm": 0.7367174029350281,
"learning_rate": 2.2036988120707436e-05,
"loss": 0.3661,
"num_input_tokens_seen": 7125096,
"step": 149
},
{
"epoch": 1.1544401544401544,
"grad_norm": 0.7686619758605957,
"learning_rate": 2.170958250426517e-05,
"loss": 0.3583,
"num_input_tokens_seen": 7169448,
"step": 150
},
{
"epoch": 1.1621621621621623,
"grad_norm": 0.763224184513092,
"learning_rate": 2.1382750199161496e-05,
"loss": 0.3806,
"num_input_tokens_seen": 7218200,
"step": 151
},
{
"epoch": 1.16988416988417,
"grad_norm": 0.7307192087173462,
"learning_rate": 2.1056548151564063e-05,
"loss": 0.3813,
"num_input_tokens_seen": 7273032,
"step": 152
},
{
"epoch": 1.1776061776061777,
"grad_norm": 0.7209292650222778,
"learning_rate": 2.073103319782656e-05,
"loss": 0.3738,
"num_input_tokens_seen": 7327712,
"step": 153
},
{
"epoch": 1.1853281853281854,
"grad_norm": 0.8867378830909729,
"learning_rate": 2.0406262054585738e-05,
"loss": 0.3732,
"num_input_tokens_seen": 7370416,
"step": 154
},
{
"epoch": 1.193050193050193,
"grad_norm": 0.7197648286819458,
"learning_rate": 2.00822913088793e-05,
"loss": 0.3354,
"num_input_tokens_seen": 7419056,
"step": 155
},
{
"epoch": 1.2007722007722008,
"grad_norm": 0.7414352297782898,
"learning_rate": 1.975917740828634e-05,
"loss": 0.3303,
"num_input_tokens_seen": 7464800,
"step": 156
},
{
"epoch": 1.2084942084942085,
"grad_norm": 0.716848611831665,
"learning_rate": 1.9436976651092144e-05,
"loss": 0.336,
"num_input_tokens_seen": 7512016,
"step": 157
},
{
"epoch": 1.2162162162162162,
"grad_norm": 0.7576581835746765,
"learning_rate": 1.9115745176478946e-05,
"loss": 0.3835,
"num_input_tokens_seen": 7558192,
"step": 158
},
{
"epoch": 1.223938223938224,
"grad_norm": 0.7724153399467468,
"learning_rate": 1.879553895474441e-05,
"loss": 0.3977,
"num_input_tokens_seen": 7606360,
"step": 159
},
{
"epoch": 1.2316602316602316,
"grad_norm": 0.7829853892326355,
"learning_rate": 1.8476413777549585e-05,
"loss": 0.3973,
"num_input_tokens_seen": 7658256,
"step": 160
},
{
"epoch": 1.2393822393822393,
"grad_norm": 0.73533695936203,
"learning_rate": 1.815842524819793e-05,
"loss": 0.3545,
"num_input_tokens_seen": 7705928,
"step": 161
},
{
"epoch": 1.247104247104247,
"grad_norm": 0.731606662273407,
"learning_rate": 1.784162877194719e-05,
"loss": 0.3374,
"num_input_tokens_seen": 7754904,
"step": 162
},
{
"epoch": 1.2548262548262548,
"grad_norm": 0.7551190257072449,
"learning_rate": 1.7526079546355744e-05,
"loss": 0.3622,
"num_input_tokens_seen": 7801208,
"step": 163
},
{
"epoch": 1.2625482625482625,
"grad_norm": 0.8263510465621948,
"learning_rate": 1.7211832551665214e-05,
"loss": 0.3612,
"num_input_tokens_seen": 7846632,
"step": 164
},
{
"epoch": 1.2702702702702702,
"grad_norm": 0.7753041982650757,
"learning_rate": 1.6898942541220856e-05,
"loss": 0.3578,
"num_input_tokens_seen": 7893448,
"step": 165
},
{
"epoch": 1.2779922779922779,
"grad_norm": 0.7862949967384338,
"learning_rate": 1.6587464031931528e-05,
"loss": 0.3916,
"num_input_tokens_seen": 7941168,
"step": 166
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.7567591667175293,
"learning_rate": 1.6277451294770834e-05,
"loss": 0.3759,
"num_input_tokens_seen": 7994968,
"step": 167
},
{
"epoch": 1.2934362934362935,
"grad_norm": 0.7898448705673218,
"learning_rate": 1.5968958345321178e-05,
"loss": 0.3732,
"num_input_tokens_seen": 8040744,
"step": 168
},
{
"epoch": 1.3011583011583012,
"grad_norm": 0.7488287687301636,
"learning_rate": 1.566203893436221e-05,
"loss": 0.3854,
"num_input_tokens_seen": 8090992,
"step": 169
},
{
"epoch": 1.308880308880309,
"grad_norm": 0.7275019884109497,
"learning_rate": 1.5356746538505513e-05,
"loss": 0.3519,
"num_input_tokens_seen": 8138688,
"step": 170
},
{
"epoch": 1.3166023166023166,
"grad_norm": 0.8107270002365112,
"learning_rate": 1.5053134350876983e-05,
"loss": 0.4125,
"num_input_tokens_seen": 8188200,
"step": 171
},
{
"epoch": 1.3243243243243243,
"grad_norm": 0.748126208782196,
"learning_rate": 1.4751255271848662e-05,
"loss": 0.3839,
"num_input_tokens_seen": 8238184,
"step": 172
},
{
"epoch": 1.332046332046332,
"grad_norm": 0.7602939605712891,
"learning_rate": 1.4451161899821558e-05,
"loss": 0.3998,
"num_input_tokens_seen": 8287400,
"step": 173
},
{
"epoch": 1.3397683397683398,
"grad_norm": 0.7316227555274963,
"learning_rate": 1.4152906522061048e-05,
"loss": 0.363,
"num_input_tokens_seen": 8332240,
"step": 174
},
{
"epoch": 1.3474903474903475,
"grad_norm": 0.6714322566986084,
"learning_rate": 1.3856541105586545e-05,
"loss": 0.3426,
"num_input_tokens_seen": 8383952,
"step": 175
},
{
"epoch": 1.3552123552123552,
"grad_norm": 0.8157584071159363,
"learning_rate": 1.3562117288116924e-05,
"loss": 0.4047,
"num_input_tokens_seen": 8430544,
"step": 176
},
{
"epoch": 1.3629343629343629,
"grad_norm": 0.7479146122932434,
"learning_rate": 1.3269686369073347e-05,
"loss": 0.3402,
"num_input_tokens_seen": 8471896,
"step": 177
},
{
"epoch": 1.3706563706563706,
"grad_norm": 0.7816237807273865,
"learning_rate": 1.2979299300641018e-05,
"loss": 0.4113,
"num_input_tokens_seen": 8517400,
"step": 178
},
{
"epoch": 1.3783783783783785,
"grad_norm": 0.7359681725502014,
"learning_rate": 1.2691006678891446e-05,
"loss": 0.3818,
"num_input_tokens_seen": 8563608,
"step": 179
},
{
"epoch": 1.3861003861003862,
"grad_norm": 0.7472447156906128,
"learning_rate": 1.240485873496677e-05,
"loss": 0.3753,
"num_input_tokens_seen": 8609832,
"step": 180
},
{
"epoch": 1.393822393822394,
"grad_norm": 0.7036008834838867,
"learning_rate": 1.2120905326327598e-05,
"loss": 0.3512,
"num_input_tokens_seen": 8656152,
"step": 181
},
{
"epoch": 1.4015444015444016,
"grad_norm": 0.7634910345077515,
"learning_rate": 1.1839195928066102e-05,
"loss": 0.4048,
"num_input_tokens_seen": 8704416,
"step": 182
},
{
"epoch": 1.4092664092664093,
"grad_norm": 0.7366232872009277,
"learning_rate": 1.1559779624285564e-05,
"loss": 0.3488,
"num_input_tokens_seen": 8748368,
"step": 183
},
{
"epoch": 1.416988416988417,
"grad_norm": 0.6778586506843567,
"learning_rate": 1.128270509954816e-05,
"loss": 0.3416,
"num_input_tokens_seen": 8798392,
"step": 184
},
{
"epoch": 1.4247104247104247,
"grad_norm": 0.727095365524292,
"learning_rate": 1.1008020630392302e-05,
"loss": 0.3747,
"num_input_tokens_seen": 8848240,
"step": 185
},
{
"epoch": 1.4324324324324325,
"grad_norm": 0.7619035243988037,
"learning_rate": 1.0735774076921129e-05,
"loss": 0.3734,
"num_input_tokens_seen": 8896008,
"step": 186
},
{
"epoch": 1.4401544401544402,
"grad_norm": 0.7539617419242859,
"learning_rate": 1.0466012874463507e-05,
"loss": 0.3563,
"num_input_tokens_seen": 8943224,
"step": 187
},
{
"epoch": 1.4478764478764479,
"grad_norm": 0.7609190940856934,
"learning_rate": 1.0198784025309047e-05,
"loss": 0.3821,
"num_input_tokens_seen": 8989312,
"step": 188
},
{
"epoch": 1.4555984555984556,
"grad_norm": 0.7501497864723206,
"learning_rate": 9.934134090518593e-06,
"loss": 0.378,
"num_input_tokens_seen": 9042424,
"step": 189
},
{
"epoch": 1.4633204633204633,
"grad_norm": 0.804295539855957,
"learning_rate": 9.672109181811575e-06,
"loss": 0.3665,
"num_input_tokens_seen": 9086072,
"step": 190
},
{
"epoch": 1.471042471042471,
"grad_norm": 0.7334874868392944,
"learning_rate": 9.412754953531663e-06,
"loss": 0.3523,
"num_input_tokens_seen": 9133240,
"step": 191
},
{
"epoch": 1.4787644787644787,
"grad_norm": 0.7709252238273621,
"learning_rate": 9.156116594692096e-06,
"loss": 0.3338,
"num_input_tokens_seen": 9178152,
"step": 192
},
{
"epoch": 1.4864864864864864,
"grad_norm": 0.7570241093635559,
"learning_rate": 8.902238821102111e-06,
"loss": 0.3877,
"num_input_tokens_seen": 9226432,
"step": 193
},
{
"epoch": 1.494208494208494,
"grad_norm": 0.7098576426506042,
"learning_rate": 8.651165867575797e-06,
"loss": 0.3696,
"num_input_tokens_seen": 9278656,
"step": 194
},
{
"epoch": 1.5019305019305018,
"grad_norm": 0.7327427864074707,
"learning_rate": 8.402941480224797e-06,
"loss": 0.3387,
"num_input_tokens_seen": 9323040,
"step": 195
},
{
"epoch": 1.5096525096525095,
"grad_norm": 0.7151550054550171,
"learning_rate": 8.15760890883607e-06,
"loss": 0.3617,
"num_input_tokens_seen": 9373024,
"step": 196
},
{
"epoch": 1.5173745173745172,
"grad_norm": 0.7485451102256775,
"learning_rate": 7.915210899336284e-06,
"loss": 0.3717,
"num_input_tokens_seen": 9421792,
"step": 197
},
{
"epoch": 1.525096525096525,
"grad_norm": 0.7549355030059814,
"learning_rate": 7.67578968634383e-06,
"loss": 0.3815,
"num_input_tokens_seen": 9470992,
"step": 198
},
{
"epoch": 1.5328185328185329,
"grad_norm": 0.759557843208313,
"learning_rate": 7.439386985810037e-06,
"loss": 0.373,
"num_input_tokens_seen": 9518240,
"step": 199
},
{
"epoch": 1.5405405405405406,
"grad_norm": 0.6981601119041443,
"learning_rate": 7.206043987750729e-06,
"loss": 0.3398,
"num_input_tokens_seen": 9565296,
"step": 200
},
{
"epoch": 1.5482625482625483,
"grad_norm": 0.7525540590286255,
"learning_rate": 6.9758013490693855e-06,
"loss": 0.3703,
"num_input_tokens_seen": 9614328,
"step": 201
},
{
"epoch": 1.555984555984556,
"grad_norm": 0.732926607131958,
"learning_rate": 6.7486991864732505e-06,
"loss": 0.3406,
"num_input_tokens_seen": 9660560,
"step": 202
},
{
"epoch": 1.5637065637065637,
"grad_norm": 0.7453652024269104,
"learning_rate": 6.524777069483526e-06,
"loss": 0.385,
"num_input_tokens_seen": 9711688,
"step": 203
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.7156092524528503,
"learning_rate": 6.3040740135408864e-06,
"loss": 0.3335,
"num_input_tokens_seen": 9758240,
"step": 204
},
{
"epoch": 1.579150579150579,
"grad_norm": 0.723884642124176,
"learning_rate": 6.086628473207606e-06,
"loss": 0.3542,
"num_input_tokens_seen": 9806832,
"step": 205
},
{
"epoch": 1.586872586872587,
"grad_norm": 0.750363290309906,
"learning_rate": 5.872478335467299e-06,
"loss": 0.349,
"num_input_tokens_seen": 9852096,
"step": 206
},
{
"epoch": 1.5945945945945947,
"grad_norm": 0.7412139773368835,
"learning_rate": 5.661660913123673e-06,
"loss": 0.351,
"num_input_tokens_seen": 9899240,
"step": 207
},
{
"epoch": 1.6023166023166024,
"grad_norm": 0.6961778402328491,
"learning_rate": 5.454212938299255e-06,
"loss": 0.3343,
"num_input_tokens_seen": 9947712,
"step": 208
},
{
"epoch": 1.6100386100386102,
"grad_norm": 0.7364585995674133,
"learning_rate": 5.250170556035302e-06,
"loss": 0.3283,
"num_input_tokens_seen": 9991232,
"step": 209
},
{
"epoch": 1.6177606177606179,
"grad_norm": 0.7384213805198669,
"learning_rate": 5.049569317994013e-06,
"loss": 0.3525,
"num_input_tokens_seen": 10038696,
"step": 210
},
{
"epoch": 1.6254826254826256,
"grad_norm": 0.8023526668548584,
"learning_rate": 4.852444176264129e-06,
"loss": 0.4056,
"num_input_tokens_seen": 10090976,
"step": 211
},
{
"epoch": 1.6332046332046333,
"grad_norm": 0.7126312851905823,
"learning_rate": 4.658829477270996e-06,
"loss": 0.3567,
"num_input_tokens_seen": 10140048,
"step": 212
},
{
"epoch": 1.640926640926641,
"grad_norm": 0.7448667883872986,
"learning_rate": 4.46875895579216e-06,
"loss": 0.3507,
"num_input_tokens_seen": 10187872,
"step": 213
},
{
"epoch": 1.6486486486486487,
"grad_norm": 0.7644662261009216,
"learning_rate": 4.282265729079535e-06,
"loss": 0.3818,
"num_input_tokens_seen": 10234368,
"step": 214
},
{
"epoch": 1.6563706563706564,
"grad_norm": 0.8175966739654541,
"learning_rate": 4.099382291089151e-06,
"loss": 0.407,
"num_input_tokens_seen": 10284208,
"step": 215
},
{
"epoch": 1.664092664092664,
"grad_norm": 0.8007215261459351,
"learning_rate": 3.920140506819539e-06,
"loss": 0.4055,
"num_input_tokens_seen": 10331624,
"step": 216
},
{
"epoch": 1.6718146718146718,
"grad_norm": 0.7793760895729065,
"learning_rate": 3.7445716067596503e-06,
"loss": 0.37,
"num_input_tokens_seen": 10377720,
"step": 217
},
{
"epoch": 1.6795366795366795,
"grad_norm": 0.6971163749694824,
"learning_rate": 3.5727061814473854e-06,
"loss": 0.3521,
"num_input_tokens_seen": 10430784,
"step": 218
},
{
"epoch": 1.6872586872586872,
"grad_norm": 0.7408994436264038,
"learning_rate": 3.404574176139591e-06,
"loss": 0.3667,
"num_input_tokens_seen": 10477896,
"step": 219
},
{
"epoch": 1.694980694980695,
"grad_norm": 0.7539640665054321,
"learning_rate": 3.240204885594514e-06,
"loss": 0.3438,
"num_input_tokens_seen": 10524432,
"step": 220
},
{
"epoch": 1.7027027027027026,
"grad_norm": 0.6838665008544922,
"learning_rate": 3.0796269489675344e-06,
"loss": 0.3155,
"num_input_tokens_seen": 10571912,
"step": 221
},
{
"epoch": 1.7104247104247103,
"grad_norm": 0.7157579660415649,
"learning_rate": 2.922868344821236e-06,
"loss": 0.3812,
"num_input_tokens_seen": 10622048,
"step": 222
},
{
"epoch": 1.718146718146718,
"grad_norm": 0.7010034918785095,
"learning_rate": 2.769956386250472e-06,
"loss": 0.351,
"num_input_tokens_seen": 10673328,
"step": 223
},
{
"epoch": 1.7258687258687258,
"grad_norm": 0.7771580219268799,
"learning_rate": 2.6209177161234445e-06,
"loss": 0.3879,
"num_input_tokens_seen": 10721704,
"step": 224
},
{
"epoch": 1.7335907335907335,
"grad_norm": 0.7555801868438721,
"learning_rate": 2.475778302439524e-06,
"loss": 0.3903,
"num_input_tokens_seen": 10769240,
"step": 225
},
{
"epoch": 1.7413127413127412,
"grad_norm": 0.7153806686401367,
"learning_rate": 2.3345634338046875e-06,
"loss": 0.3437,
"num_input_tokens_seen": 10814464,
"step": 226
},
{
"epoch": 1.7490347490347489,
"grad_norm": 0.7552188038825989,
"learning_rate": 2.1972977150253064e-06,
"loss": 0.3612,
"num_input_tokens_seen": 10859000,
"step": 227
},
{
"epoch": 1.7567567567567568,
"grad_norm": 0.6911195516586304,
"learning_rate": 2.064005062821095e-06,
"loss": 0.32,
"num_input_tokens_seen": 10906008,
"step": 228
},
{
"epoch": 1.7644787644787645,
"grad_norm": 0.7437323927879333,
"learning_rate": 1.9347087016579278e-06,
"loss": 0.3504,
"num_input_tokens_seen": 10952840,
"step": 229
},
{
"epoch": 1.7722007722007722,
"grad_norm": 0.8080976605415344,
"learning_rate": 1.8094311597013052e-06,
"loss": 0.3971,
"num_input_tokens_seen": 11001384,
"step": 230
},
{
"epoch": 1.77992277992278,
"grad_norm": 0.7577563524246216,
"learning_rate": 1.6881942648911076e-06,
"loss": 0.3904,
"num_input_tokens_seen": 11049440,
"step": 231
},
{
"epoch": 1.7876447876447876,
"grad_norm": 0.732723593711853,
"learning_rate": 1.5710191411383662e-06,
"loss": 0.3325,
"num_input_tokens_seen": 11093928,
"step": 232
},
{
"epoch": 1.7953667953667953,
"grad_norm": 0.7599166035652161,
"learning_rate": 1.4579262046447107e-06,
"loss": 0.393,
"num_input_tokens_seen": 11140832,
"step": 233
},
{
"epoch": 1.803088803088803,
"grad_norm": 0.8044569492340088,
"learning_rate": 1.3489351603451329e-06,
"loss": 0.4306,
"num_input_tokens_seen": 11191600,
"step": 234
},
{
"epoch": 1.810810810810811,
"grad_norm": 0.795045793056488,
"learning_rate": 1.2440649984746256e-06,
"loss": 0.3767,
"num_input_tokens_seen": 11235984,
"step": 235
},
{
"epoch": 1.8185328185328187,
"grad_norm": 0.7915075421333313,
"learning_rate": 1.1433339912594266e-06,
"loss": 0.3552,
"num_input_tokens_seen": 11276416,
"step": 236
},
{
"epoch": 1.8262548262548264,
"grad_norm": 0.7200133204460144,
"learning_rate": 1.046759689733301e-06,
"loss": 0.3686,
"num_input_tokens_seen": 11328536,
"step": 237
},
{
"epoch": 1.833976833976834,
"grad_norm": 0.7975424528121948,
"learning_rate": 9.54358920679524e-07,
"loss": 0.3981,
"num_input_tokens_seen": 11374664,
"step": 238
},
{
"epoch": 1.8416988416988418,
"grad_norm": 0.7360520958900452,
"learning_rate": 8.661477836990339e-07,
"loss": 0.3678,
"num_input_tokens_seen": 11424256,
"step": 239
},
{
"epoch": 1.8494208494208495,
"grad_norm": 0.713374674320221,
"learning_rate": 7.821416484052879e-07,
"loss": 0.3443,
"num_input_tokens_seen": 11469272,
"step": 240
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.7081705331802368,
"learning_rate": 7.02355151746309e-07,
"loss": 0.3333,
"num_input_tokens_seen": 11514592,
"step": 241
},
{
"epoch": 1.864864864864865,
"grad_norm": 0.7440131902694702,
"learning_rate": 6.268021954544096e-07,
"loss": 0.3661,
"num_input_tokens_seen": 11559064,
"step": 242
},
{
"epoch": 1.8725868725868726,
"grad_norm": 0.6954824328422546,
"learning_rate": 5.554959436239821e-07,
"loss": 0.3499,
"num_input_tokens_seen": 11609720,
"step": 243
},
{
"epoch": 1.8803088803088803,
"grad_norm": 0.7557532787322998,
"learning_rate": 4.884488204178333e-07,
"loss": 0.3657,
"num_input_tokens_seen": 11657256,
"step": 244
},
{
"epoch": 1.888030888030888,
"grad_norm": 0.7957518100738525,
"learning_rate": 4.256725079024554e-07,
"loss": 0.3827,
"num_input_tokens_seen": 11704528,
"step": 245
},
{
"epoch": 1.8957528957528957,
"grad_norm": 0.7069993019104004,
"learning_rate": 3.6717794401256445e-07,
"loss": 0.34,
"num_input_tokens_seen": 11752560,
"step": 246
},
{
"epoch": 1.9034749034749034,
"grad_norm": 0.7414040565490723,
"learning_rate": 3.129753206453201e-07,
"loss": 0.3512,
"num_input_tokens_seen": 11798928,
"step": 247
},
{
"epoch": 1.9111969111969112,
"grad_norm": 0.6941518187522888,
"learning_rate": 2.630740818845123e-07,
"loss": 0.3566,
"num_input_tokens_seen": 11848456,
"step": 248
},
{
"epoch": 1.9189189189189189,
"grad_norm": 0.6867352724075317,
"learning_rate": 2.174829223550806e-07,
"loss": 0.3551,
"num_input_tokens_seen": 11899192,
"step": 249
},
{
"epoch": 1.9266409266409266,
"grad_norm": 0.6801990270614624,
"learning_rate": 1.762097857081646e-07,
"loss": 0.3205,
"num_input_tokens_seen": 11947904,
"step": 250
},
{
"epoch": 1.9343629343629343,
"grad_norm": 0.682864248752594,
"learning_rate": 1.3926186323703905e-07,
"loss": 0.3455,
"num_input_tokens_seen": 11997104,
"step": 251
},
{
"epoch": 1.942084942084942,
"grad_norm": 0.7804450988769531,
"learning_rate": 1.0664559262413831e-07,
"loss": 0.4022,
"num_input_tokens_seen": 12045824,
"step": 252
},
{
"epoch": 1.9498069498069497,
"grad_norm": 0.7370994687080383,
"learning_rate": 7.836665681935085e-08,
"loss": 0.3797,
"num_input_tokens_seen": 12096360,
"step": 253
},
{
"epoch": 1.9575289575289574,
"grad_norm": 0.7060906291007996,
"learning_rate": 5.44299830498668e-08,
"loss": 0.3234,
"num_input_tokens_seen": 12141952,
"step": 254
},
{
"epoch": 1.965250965250965,
"grad_norm": 0.7281149625778198,
"learning_rate": 3.4839741961659176e-08,
"loss": 0.3741,
"num_input_tokens_seen": 12193160,
"step": 255
},
{
"epoch": 1.972972972972973,
"grad_norm": 0.7566375136375427,
"learning_rate": 1.9599346892809622e-08,
"loss": 0.3475,
"num_input_tokens_seen": 12237616,
"step": 256
},
{
"epoch": 1.9806949806949807,
"grad_norm": 0.7222142815589905,
"learning_rate": 8.711453278778536e-09,
"loss": 0.3699,
"num_input_tokens_seen": 12283352,
"step": 257
},
{
"epoch": 1.9884169884169884,
"grad_norm": 0.7003583908081055,
"learning_rate": 2.177958189733542e-09,
"loss": 0.3471,
"num_input_tokens_seen": 12333744,
"step": 258
},
{
"epoch": 1.9884169884169884,
"num_input_tokens_seen": 12333744,
"step": 258,
"total_flos": 2.0534009572779622e+17,
"train_loss": 0.535006606301596,
"train_runtime": 453.6685,
"train_samples_per_second": 9.13,
"train_steps_per_second": 0.569
}
],
"logging_steps": 1.0,
"max_steps": 258,
"num_input_tokens_seen": 12333744,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0534009572779622e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}