Safetensors
Qwulu3 / checkpoint-3000 /trainer_state.json
bnjmnmarie's picture
Upload folder using huggingface_hub
95ec587 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4433473649695545,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0036945613747462877,
"grad_norm": 2.2292165756225586,
"learning_rate": 5.911330049261084e-06,
"loss": 0.9395,
"step": 25
},
{
"epoch": 0.007389122749492575,
"grad_norm": 0.8381065130233765,
"learning_rate": 1.206896551724138e-05,
"loss": 0.89,
"step": 50
},
{
"epoch": 0.011083684124238862,
"grad_norm": 0.9238471984863281,
"learning_rate": 1.8226600985221676e-05,
"loss": 0.8384,
"step": 75
},
{
"epoch": 0.01477824549898515,
"grad_norm": 0.6837311387062073,
"learning_rate": 2.438423645320197e-05,
"loss": 0.784,
"step": 100
},
{
"epoch": 0.01847280687373144,
"grad_norm": 0.7093706727027893,
"learning_rate": 3.0541871921182266e-05,
"loss": 0.7675,
"step": 125
},
{
"epoch": 0.022167368248477724,
"grad_norm": 0.6208077073097229,
"learning_rate": 3.669950738916256e-05,
"loss": 0.7466,
"step": 150
},
{
"epoch": 0.025861929623224013,
"grad_norm": 0.8929094076156616,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.7386,
"step": 175
},
{
"epoch": 0.0295564909979703,
"grad_norm": 0.7828120589256287,
"learning_rate": 4.901477832512316e-05,
"loss": 0.7173,
"step": 200
},
{
"epoch": 0.03325105237271659,
"grad_norm": 0.7134449481964111,
"learning_rate": 5.517241379310345e-05,
"loss": 0.7108,
"step": 225
},
{
"epoch": 0.03694561374746288,
"grad_norm": 0.7464851140975952,
"learning_rate": 6.133004926108375e-05,
"loss": 0.7332,
"step": 250
},
{
"epoch": 0.04064017512220916,
"grad_norm": 0.677793025970459,
"learning_rate": 6.748768472906404e-05,
"loss": 0.7194,
"step": 275
},
{
"epoch": 0.04433473649695545,
"grad_norm": 0.7936354875564575,
"learning_rate": 7.364532019704434e-05,
"loss": 0.7253,
"step": 300
},
{
"epoch": 0.04802929787170174,
"grad_norm": 0.6711100935935974,
"learning_rate": 7.980295566502463e-05,
"loss": 0.7018,
"step": 325
},
{
"epoch": 0.051723859246448026,
"grad_norm": 0.5816489458084106,
"learning_rate": 8.596059113300493e-05,
"loss": 0.7298,
"step": 350
},
{
"epoch": 0.055418420621194314,
"grad_norm": 0.6680681705474854,
"learning_rate": 9.211822660098522e-05,
"loss": 0.7149,
"step": 375
},
{
"epoch": 0.0591129819959406,
"grad_norm": 0.5643934607505798,
"learning_rate": 9.827586206896552e-05,
"loss": 0.763,
"step": 400
},
{
"epoch": 0.06280754337068689,
"grad_norm": 0.5739309191703796,
"learning_rate": 9.986286759104069e-05,
"loss": 0.7345,
"step": 425
},
{
"epoch": 0.06650210474543318,
"grad_norm": 0.5929909944534302,
"learning_rate": 9.967240591193052e-05,
"loss": 0.7364,
"step": 450
},
{
"epoch": 0.07019666612017947,
"grad_norm": 0.609235405921936,
"learning_rate": 9.948194423282036e-05,
"loss": 0.7294,
"step": 475
},
{
"epoch": 0.07389122749492576,
"grad_norm": 0.4643324613571167,
"learning_rate": 9.92914825537102e-05,
"loss": 0.7344,
"step": 500
},
{
"epoch": 0.07758578886967203,
"grad_norm": 0.5267598032951355,
"learning_rate": 9.910102087460003e-05,
"loss": 0.7249,
"step": 525
},
{
"epoch": 0.08128035024441832,
"grad_norm": 0.47951069474220276,
"learning_rate": 9.891055919548987e-05,
"loss": 0.7256,
"step": 550
},
{
"epoch": 0.08497491161916461,
"grad_norm": 0.4505012333393097,
"learning_rate": 9.87200975163797e-05,
"loss": 0.7359,
"step": 575
},
{
"epoch": 0.0886694729939109,
"grad_norm": 0.5320091247558594,
"learning_rate": 9.852963583726955e-05,
"loss": 0.6856,
"step": 600
},
{
"epoch": 0.09236403436865719,
"grad_norm": 0.5583036541938782,
"learning_rate": 9.833917415815939e-05,
"loss": 0.7235,
"step": 625
},
{
"epoch": 0.09605859574340347,
"grad_norm": 0.5139252543449402,
"learning_rate": 9.814871247904922e-05,
"loss": 0.7272,
"step": 650
},
{
"epoch": 0.09975315711814976,
"grad_norm": 0.4989326000213623,
"learning_rate": 9.795825079993906e-05,
"loss": 0.6952,
"step": 675
},
{
"epoch": 0.10344771849289605,
"grad_norm": 0.47355732321739197,
"learning_rate": 9.776778912082889e-05,
"loss": 0.7266,
"step": 700
},
{
"epoch": 0.10714227986764234,
"grad_norm": 0.3588508367538452,
"learning_rate": 9.757732744171874e-05,
"loss": 0.7406,
"step": 725
},
{
"epoch": 0.11083684124238863,
"grad_norm": 0.4120556712150574,
"learning_rate": 9.738686576260857e-05,
"loss": 0.7443,
"step": 750
},
{
"epoch": 0.11453140261713492,
"grad_norm": 0.5160555839538574,
"learning_rate": 9.71964040834984e-05,
"loss": 0.7134,
"step": 775
},
{
"epoch": 0.1182259639918812,
"grad_norm": 0.5423145890235901,
"learning_rate": 9.700594240438823e-05,
"loss": 0.7289,
"step": 800
},
{
"epoch": 0.1219205253666275,
"grad_norm": 0.5352346301078796,
"learning_rate": 9.681548072527808e-05,
"loss": 0.7144,
"step": 825
},
{
"epoch": 0.12561508674137378,
"grad_norm": 0.47908860445022583,
"learning_rate": 9.662501904616791e-05,
"loss": 0.7175,
"step": 850
},
{
"epoch": 0.12930964811612006,
"grad_norm": 0.47986069321632385,
"learning_rate": 9.643455736705776e-05,
"loss": 0.6983,
"step": 875
},
{
"epoch": 0.13300420949086636,
"grad_norm": 0.6903620958328247,
"learning_rate": 9.624409568794759e-05,
"loss": 0.7086,
"step": 900
},
{
"epoch": 0.13669877086561263,
"grad_norm": 0.44413208961486816,
"learning_rate": 9.605363400883742e-05,
"loss": 0.7306,
"step": 925
},
{
"epoch": 0.14039333224035894,
"grad_norm": 0.4634678065776825,
"learning_rate": 9.586317232972727e-05,
"loss": 0.7061,
"step": 950
},
{
"epoch": 0.1440878936151052,
"grad_norm": 0.5110129714012146,
"learning_rate": 9.56727106506171e-05,
"loss": 0.7406,
"step": 975
},
{
"epoch": 0.1477824549898515,
"grad_norm": 0.5460866093635559,
"learning_rate": 9.548224897150694e-05,
"loss": 0.721,
"step": 1000
},
{
"epoch": 0.1514770163645978,
"grad_norm": 0.5179885029792786,
"learning_rate": 9.529178729239677e-05,
"loss": 0.7068,
"step": 1025
},
{
"epoch": 0.15517157773934406,
"grad_norm": 0.40280836820602417,
"learning_rate": 9.51013256132866e-05,
"loss": 0.7009,
"step": 1050
},
{
"epoch": 0.15886613911409037,
"grad_norm": 1.2706756591796875,
"learning_rate": 9.491086393417645e-05,
"loss": 0.7125,
"step": 1075
},
{
"epoch": 0.16256070048883664,
"grad_norm": 0.4963163435459137,
"learning_rate": 9.47204022550663e-05,
"loss": 0.7149,
"step": 1100
},
{
"epoch": 0.16625526186358294,
"grad_norm": 0.5147728323936462,
"learning_rate": 9.452994057595613e-05,
"loss": 0.7245,
"step": 1125
},
{
"epoch": 0.16994982323832922,
"grad_norm": 0.5933899879455566,
"learning_rate": 9.433947889684596e-05,
"loss": 0.7261,
"step": 1150
},
{
"epoch": 0.17364438461307552,
"grad_norm": 0.4750466048717499,
"learning_rate": 9.414901721773579e-05,
"loss": 0.7206,
"step": 1175
},
{
"epoch": 0.1773389459878218,
"grad_norm": 0.46546968817710876,
"learning_rate": 9.395855553862564e-05,
"loss": 0.7121,
"step": 1200
},
{
"epoch": 0.1810335073625681,
"grad_norm": 0.6512172818183899,
"learning_rate": 9.376809385951547e-05,
"loss": 0.7212,
"step": 1225
},
{
"epoch": 0.18472806873731437,
"grad_norm": 0.34932607412338257,
"learning_rate": 9.35776321804053e-05,
"loss": 0.704,
"step": 1250
},
{
"epoch": 0.18842263011206067,
"grad_norm": 0.4648846983909607,
"learning_rate": 9.338717050129514e-05,
"loss": 0.7419,
"step": 1275
},
{
"epoch": 0.19211719148680695,
"grad_norm": 0.4566064774990082,
"learning_rate": 9.319670882218498e-05,
"loss": 0.7318,
"step": 1300
},
{
"epoch": 0.19581175286155325,
"grad_norm": 0.5357668399810791,
"learning_rate": 9.300624714307481e-05,
"loss": 0.6973,
"step": 1325
},
{
"epoch": 0.19950631423629953,
"grad_norm": 0.4423241913318634,
"learning_rate": 9.281578546396466e-05,
"loss": 0.7298,
"step": 1350
},
{
"epoch": 0.20320087561104583,
"grad_norm": 0.4530033767223358,
"learning_rate": 9.26253237848545e-05,
"loss": 0.7161,
"step": 1375
},
{
"epoch": 0.2068954369857921,
"grad_norm": 0.4678841233253479,
"learning_rate": 9.243486210574433e-05,
"loss": 0.6972,
"step": 1400
},
{
"epoch": 0.21058999836053838,
"grad_norm": 0.6039907336235046,
"learning_rate": 9.224440042663417e-05,
"loss": 0.7165,
"step": 1425
},
{
"epoch": 0.21428455973528468,
"grad_norm": 0.4463271498680115,
"learning_rate": 9.2053938747524e-05,
"loss": 0.6863,
"step": 1450
},
{
"epoch": 0.21797912111003095,
"grad_norm": 0.5739301443099976,
"learning_rate": 9.186347706841384e-05,
"loss": 0.6907,
"step": 1475
},
{
"epoch": 0.22167368248477726,
"grad_norm": 0.4577805697917938,
"learning_rate": 9.167301538930367e-05,
"loss": 0.7114,
"step": 1500
},
{
"epoch": 0.22536824385952353,
"grad_norm": 0.4522150158882141,
"learning_rate": 9.14825537101935e-05,
"loss": 0.6877,
"step": 1525
},
{
"epoch": 0.22906280523426983,
"grad_norm": 0.49612903594970703,
"learning_rate": 9.129209203108335e-05,
"loss": 0.7112,
"step": 1550
},
{
"epoch": 0.2327573666090161,
"grad_norm": 0.4710284471511841,
"learning_rate": 9.11016303519732e-05,
"loss": 0.7062,
"step": 1575
},
{
"epoch": 0.2364519279837624,
"grad_norm": 0.5009223818778992,
"learning_rate": 9.091116867286303e-05,
"loss": 0.7275,
"step": 1600
},
{
"epoch": 0.24014648935850869,
"grad_norm": 0.5547946691513062,
"learning_rate": 9.072070699375286e-05,
"loss": 0.6993,
"step": 1625
},
{
"epoch": 0.243841050733255,
"grad_norm": 0.4580361843109131,
"learning_rate": 9.05302453146427e-05,
"loss": 0.7106,
"step": 1650
},
{
"epoch": 0.24753561210800126,
"grad_norm": 0.4767173230648041,
"learning_rate": 9.033978363553254e-05,
"loss": 0.7103,
"step": 1675
},
{
"epoch": 0.25123017348274757,
"grad_norm": 0.502202570438385,
"learning_rate": 9.014932195642237e-05,
"loss": 0.6921,
"step": 1700
},
{
"epoch": 0.25492473485749384,
"grad_norm": 0.5283953547477722,
"learning_rate": 8.99588602773122e-05,
"loss": 0.7077,
"step": 1725
},
{
"epoch": 0.2586192962322401,
"grad_norm": 0.4994209408760071,
"learning_rate": 8.976839859820204e-05,
"loss": 0.7,
"step": 1750
},
{
"epoch": 0.2623138576069864,
"grad_norm": 0.48279210925102234,
"learning_rate": 8.957793691909188e-05,
"loss": 0.7114,
"step": 1775
},
{
"epoch": 0.2660084189817327,
"grad_norm": 0.6055914759635925,
"learning_rate": 8.938747523998172e-05,
"loss": 0.7081,
"step": 1800
},
{
"epoch": 0.269702980356479,
"grad_norm": 0.489519327878952,
"learning_rate": 8.919701356087156e-05,
"loss": 0.6946,
"step": 1825
},
{
"epoch": 0.27339754173122527,
"grad_norm": 0.5379961133003235,
"learning_rate": 8.90065518817614e-05,
"loss": 0.6996,
"step": 1850
},
{
"epoch": 0.27709210310597154,
"grad_norm": 0.47824332118034363,
"learning_rate": 8.881609020265123e-05,
"loss": 0.6881,
"step": 1875
},
{
"epoch": 0.2807866644807179,
"grad_norm": 1551.0594482421875,
"learning_rate": 8.862562852354107e-05,
"loss": 0.6998,
"step": 1900
},
{
"epoch": 0.28448122585546415,
"grad_norm": 0.4107681214809418,
"learning_rate": 8.84351668444309e-05,
"loss": 0.7088,
"step": 1925
},
{
"epoch": 0.2881757872302104,
"grad_norm": 0.4558309316635132,
"learning_rate": 8.824470516532074e-05,
"loss": 0.712,
"step": 1950
},
{
"epoch": 0.2918703486049567,
"grad_norm": 0.539107620716095,
"learning_rate": 8.805424348621057e-05,
"loss": 0.7159,
"step": 1975
},
{
"epoch": 0.295564909979703,
"grad_norm": 0.5768142938613892,
"learning_rate": 8.786378180710042e-05,
"loss": 0.7072,
"step": 2000
},
{
"epoch": 0.2992594713544493,
"grad_norm": 0.5575465559959412,
"learning_rate": 8.767332012799025e-05,
"loss": 0.7118,
"step": 2025
},
{
"epoch": 0.3029540327291956,
"grad_norm": 0.5190144181251526,
"learning_rate": 8.748285844888008e-05,
"loss": 0.7099,
"step": 2050
},
{
"epoch": 0.30664859410394185,
"grad_norm": 0.4934520125389099,
"learning_rate": 8.729239676976993e-05,
"loss": 0.692,
"step": 2075
},
{
"epoch": 0.3103431554786881,
"grad_norm": 0.42613571882247925,
"learning_rate": 8.710193509065976e-05,
"loss": 0.7277,
"step": 2100
},
{
"epoch": 0.31403771685343446,
"grad_norm": 0.5124602317810059,
"learning_rate": 8.691147341154961e-05,
"loss": 0.6801,
"step": 2125
},
{
"epoch": 0.31773227822818073,
"grad_norm": 0.5284898281097412,
"learning_rate": 8.672101173243944e-05,
"loss": 0.7103,
"step": 2150
},
{
"epoch": 0.321426839602927,
"grad_norm": 0.43099457025527954,
"learning_rate": 8.653055005332927e-05,
"loss": 0.7023,
"step": 2175
},
{
"epoch": 0.3251214009776733,
"grad_norm": 0.5190865993499756,
"learning_rate": 8.63400883742191e-05,
"loss": 0.7144,
"step": 2200
},
{
"epoch": 0.3288159623524196,
"grad_norm": 0.4722968637943268,
"learning_rate": 8.614962669510895e-05,
"loss": 0.7351,
"step": 2225
},
{
"epoch": 0.3325105237271659,
"grad_norm": 0.6091466546058655,
"learning_rate": 8.595916501599878e-05,
"loss": 0.7062,
"step": 2250
},
{
"epoch": 0.33620508510191216,
"grad_norm": 0.6135897040367126,
"learning_rate": 8.576870333688862e-05,
"loss": 0.7117,
"step": 2275
},
{
"epoch": 0.33989964647665843,
"grad_norm": 0.5224157571792603,
"learning_rate": 8.557824165777846e-05,
"loss": 0.7068,
"step": 2300
},
{
"epoch": 0.34359420785140476,
"grad_norm": 0.4863536059856415,
"learning_rate": 8.53877799786683e-05,
"loss": 0.6952,
"step": 2325
},
{
"epoch": 0.34728876922615104,
"grad_norm": 0.4728885889053345,
"learning_rate": 8.519731829955814e-05,
"loss": 0.7289,
"step": 2350
},
{
"epoch": 0.3509833306008973,
"grad_norm": 0.5152695775032043,
"learning_rate": 8.500685662044798e-05,
"loss": 0.6986,
"step": 2375
},
{
"epoch": 0.3546778919756436,
"grad_norm": 0.4407690465450287,
"learning_rate": 8.481639494133781e-05,
"loss": 0.6983,
"step": 2400
},
{
"epoch": 0.3583724533503899,
"grad_norm": 0.4756406545639038,
"learning_rate": 8.462593326222764e-05,
"loss": 0.6626,
"step": 2425
},
{
"epoch": 0.3620670147251362,
"grad_norm": 0.48595255613327026,
"learning_rate": 8.443547158311747e-05,
"loss": 0.6892,
"step": 2450
},
{
"epoch": 0.36576157609988247,
"grad_norm": 0.47830772399902344,
"learning_rate": 8.424500990400732e-05,
"loss": 0.7162,
"step": 2475
},
{
"epoch": 0.36945613747462874,
"grad_norm": 0.43090149760246277,
"learning_rate": 8.405454822489715e-05,
"loss": 0.6913,
"step": 2500
},
{
"epoch": 0.373150698849375,
"grad_norm": 0.5338718295097351,
"learning_rate": 8.386408654578698e-05,
"loss": 0.7159,
"step": 2525
},
{
"epoch": 0.37684526022412135,
"grad_norm": 0.4907350242137909,
"learning_rate": 8.367362486667683e-05,
"loss": 0.7022,
"step": 2550
},
{
"epoch": 0.3805398215988676,
"grad_norm": 0.44093430042266846,
"learning_rate": 8.348316318756668e-05,
"loss": 0.7124,
"step": 2575
},
{
"epoch": 0.3842343829736139,
"grad_norm": 0.5388796925544739,
"learning_rate": 8.329270150845651e-05,
"loss": 0.7304,
"step": 2600
},
{
"epoch": 0.38792894434836017,
"grad_norm": 0.4456349313259125,
"learning_rate": 8.310223982934634e-05,
"loss": 0.6964,
"step": 2625
},
{
"epoch": 0.3916235057231065,
"grad_norm": 0.4602237343788147,
"learning_rate": 8.291177815023617e-05,
"loss": 0.6707,
"step": 2650
},
{
"epoch": 0.3953180670978528,
"grad_norm": 0.4726378917694092,
"learning_rate": 8.272131647112601e-05,
"loss": 0.694,
"step": 2675
},
{
"epoch": 0.39901262847259905,
"grad_norm": 0.500451922416687,
"learning_rate": 8.253085479201585e-05,
"loss": 0.6812,
"step": 2700
},
{
"epoch": 0.4027071898473453,
"grad_norm": 0.4073813259601593,
"learning_rate": 8.234039311290569e-05,
"loss": 0.7028,
"step": 2725
},
{
"epoch": 0.40640175122209166,
"grad_norm": 0.43644702434539795,
"learning_rate": 8.214993143379552e-05,
"loss": 0.6936,
"step": 2750
},
{
"epoch": 0.41009631259683793,
"grad_norm": 0.5256789922714233,
"learning_rate": 8.195946975468535e-05,
"loss": 0.7035,
"step": 2775
},
{
"epoch": 0.4137908739715842,
"grad_norm": 0.48385316133499146,
"learning_rate": 8.17690080755752e-05,
"loss": 0.6736,
"step": 2800
},
{
"epoch": 0.4174854353463305,
"grad_norm": 0.49825233221054077,
"learning_rate": 8.157854639646504e-05,
"loss": 0.7019,
"step": 2825
},
{
"epoch": 0.42117999672107675,
"grad_norm": 0.5086994171142578,
"learning_rate": 8.138808471735488e-05,
"loss": 0.6918,
"step": 2850
},
{
"epoch": 0.4248745580958231,
"grad_norm": 0.5430259108543396,
"learning_rate": 8.119762303824471e-05,
"loss": 0.7175,
"step": 2875
},
{
"epoch": 0.42856911947056936,
"grad_norm": 0.5889118313789368,
"learning_rate": 8.100716135913454e-05,
"loss": 0.6967,
"step": 2900
},
{
"epoch": 0.43226368084531563,
"grad_norm": 0.5345672369003296,
"learning_rate": 8.081669968002439e-05,
"loss": 0.6926,
"step": 2925
},
{
"epoch": 0.4359582422200619,
"grad_norm": 0.511101245880127,
"learning_rate": 8.062623800091422e-05,
"loss": 0.7248,
"step": 2950
},
{
"epoch": 0.43965280359480824,
"grad_norm": 0.511239767074585,
"learning_rate": 8.043577632180405e-05,
"loss": 0.7319,
"step": 2975
},
{
"epoch": 0.4433473649695545,
"grad_norm": 0.5121573805809021,
"learning_rate": 8.024531464269389e-05,
"loss": 0.7023,
"step": 3000
}
],
"logging_steps": 25,
"max_steps": 13532,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1989742094773969e+19,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}