Yaxin1992's picture
Training in progress, step 10000, checkpoint
a1284a0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8850340738118417,
"eval_steps": 500,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008850340738118417,
"grad_norm": 1.517469048500061,
"learning_rate": 1.9919999999999997e-06,
"loss": 1.519,
"step": 100
},
{
"epoch": 0.017700681476236834,
"grad_norm": 1.6662976741790771,
"learning_rate": 1.984e-06,
"loss": 1.4776,
"step": 200
},
{
"epoch": 0.026551022214355253,
"grad_norm": 0.8021761775016785,
"learning_rate": 1.976e-06,
"loss": 1.4026,
"step": 300
},
{
"epoch": 0.03540136295247367,
"grad_norm": 0.8772447109222412,
"learning_rate": 1.968e-06,
"loss": 1.3306,
"step": 400
},
{
"epoch": 0.04425170369059209,
"grad_norm": 0.4179963767528534,
"learning_rate": 1.96e-06,
"loss": 1.286,
"step": 500
},
{
"epoch": 0.053102044428710506,
"grad_norm": 0.4829399287700653,
"learning_rate": 1.9519999999999997e-06,
"loss": 1.2756,
"step": 600
},
{
"epoch": 0.061952385166828924,
"grad_norm": 0.5651261806488037,
"learning_rate": 1.944e-06,
"loss": 1.2721,
"step": 700
},
{
"epoch": 0.07080272590494734,
"grad_norm": 0.5160872936248779,
"learning_rate": 1.9359999999999998e-06,
"loss": 1.2579,
"step": 800
},
{
"epoch": 0.07965306664306576,
"grad_norm": 0.7501122355461121,
"learning_rate": 1.928e-06,
"loss": 1.2853,
"step": 900
},
{
"epoch": 0.08850340738118417,
"grad_norm": 0.591393232345581,
"learning_rate": 1.92e-06,
"loss": 1.2472,
"step": 1000
},
{
"epoch": 0.0973537481193026,
"grad_norm": 0.5573005080223083,
"learning_rate": 1.9119999999999997e-06,
"loss": 1.2348,
"step": 1100
},
{
"epoch": 0.10620408885742101,
"grad_norm": 0.607552707195282,
"learning_rate": 1.904e-06,
"loss": 1.2679,
"step": 1200
},
{
"epoch": 0.11505442959553942,
"grad_norm": 0.6899151802062988,
"learning_rate": 1.8959999999999997e-06,
"loss": 1.2513,
"step": 1300
},
{
"epoch": 0.12390477033365785,
"grad_norm": 0.5849478244781494,
"learning_rate": 1.8879999999999998e-06,
"loss": 1.226,
"step": 1400
},
{
"epoch": 0.13275511107177626,
"grad_norm": 0.6027043461799622,
"learning_rate": 1.8799999999999998e-06,
"loss": 1.2163,
"step": 1500
},
{
"epoch": 0.14160545180989467,
"grad_norm": 0.6785966157913208,
"learning_rate": 1.872e-06,
"loss": 1.2076,
"step": 1600
},
{
"epoch": 0.1504557925480131,
"grad_norm": 0.5975379347801208,
"learning_rate": 1.864e-06,
"loss": 1.2146,
"step": 1700
},
{
"epoch": 0.15930613328613152,
"grad_norm": 0.685453474521637,
"learning_rate": 1.856e-06,
"loss": 1.198,
"step": 1800
},
{
"epoch": 0.16815647402424994,
"grad_norm": 1.1250847578048706,
"learning_rate": 1.848e-06,
"loss": 1.218,
"step": 1900
},
{
"epoch": 0.17700681476236835,
"grad_norm": 0.6129019260406494,
"learning_rate": 1.84e-06,
"loss": 1.1894,
"step": 2000
},
{
"epoch": 0.18585715550048676,
"grad_norm": 0.7388463616371155,
"learning_rate": 1.832e-06,
"loss": 1.1962,
"step": 2100
},
{
"epoch": 0.1947074962386052,
"grad_norm": 0.5909475684165955,
"learning_rate": 1.824e-06,
"loss": 1.241,
"step": 2200
},
{
"epoch": 0.2035578369767236,
"grad_norm": 0.5264613032341003,
"learning_rate": 1.816e-06,
"loss": 1.1915,
"step": 2300
},
{
"epoch": 0.21240817771484202,
"grad_norm": 0.6419306397438049,
"learning_rate": 1.8079999999999999e-06,
"loss": 1.1938,
"step": 2400
},
{
"epoch": 0.22125851845296043,
"grad_norm": 0.6240788698196411,
"learning_rate": 1.8e-06,
"loss": 1.214,
"step": 2500
},
{
"epoch": 0.23010885919107885,
"grad_norm": 0.7954173684120178,
"learning_rate": 1.792e-06,
"loss": 1.195,
"step": 2600
},
{
"epoch": 0.23895919992919729,
"grad_norm": 0.7451553344726562,
"learning_rate": 1.784e-06,
"loss": 1.1793,
"step": 2700
},
{
"epoch": 0.2478095406673157,
"grad_norm": 0.8471085429191589,
"learning_rate": 1.776e-06,
"loss": 1.1895,
"step": 2800
},
{
"epoch": 0.2566598814054341,
"grad_norm": 1.0085513591766357,
"learning_rate": 1.7679999999999998e-06,
"loss": 1.2087,
"step": 2900
},
{
"epoch": 0.2655102221435525,
"grad_norm": 0.7709102630615234,
"learning_rate": 1.7599999999999999e-06,
"loss": 1.1959,
"step": 3000
},
{
"epoch": 0.27436056288167093,
"grad_norm": 0.7612162232398987,
"learning_rate": 1.752e-06,
"loss": 1.1803,
"step": 3100
},
{
"epoch": 0.28321090361978934,
"grad_norm": 1.0669310092926025,
"learning_rate": 1.744e-06,
"loss": 1.1548,
"step": 3200
},
{
"epoch": 0.29206124435790776,
"grad_norm": 0.7489383220672607,
"learning_rate": 1.736e-06,
"loss": 1.1654,
"step": 3300
},
{
"epoch": 0.3009115850960262,
"grad_norm": 0.8011970520019531,
"learning_rate": 1.7279999999999998e-06,
"loss": 1.1789,
"step": 3400
},
{
"epoch": 0.30976192583414464,
"grad_norm": 0.7374864816665649,
"learning_rate": 1.7199999999999998e-06,
"loss": 1.171,
"step": 3500
},
{
"epoch": 0.31861226657226305,
"grad_norm": 0.8267546892166138,
"learning_rate": 1.7119999999999999e-06,
"loss": 1.1435,
"step": 3600
},
{
"epoch": 0.32746260731038146,
"grad_norm": 0.8837570548057556,
"learning_rate": 1.704e-06,
"loss": 1.1674,
"step": 3700
},
{
"epoch": 0.33631294804849987,
"grad_norm": 0.7216938138008118,
"learning_rate": 1.696e-06,
"loss": 1.1446,
"step": 3800
},
{
"epoch": 0.3451632887866183,
"grad_norm": 0.8478527069091797,
"learning_rate": 1.6879999999999998e-06,
"loss": 1.1721,
"step": 3900
},
{
"epoch": 0.3540136295247367,
"grad_norm": 0.8281861543655396,
"learning_rate": 1.6799999999999998e-06,
"loss": 1.1741,
"step": 4000
},
{
"epoch": 0.3628639702628551,
"grad_norm": 0.6918274760246277,
"learning_rate": 1.6719999999999998e-06,
"loss": 1.1433,
"step": 4100
},
{
"epoch": 0.3717143110009735,
"grad_norm": 0.6722283363342285,
"learning_rate": 1.6639999999999999e-06,
"loss": 1.189,
"step": 4200
},
{
"epoch": 0.38056465173909193,
"grad_norm": 0.8337692022323608,
"learning_rate": 1.656e-06,
"loss": 1.1694,
"step": 4300
},
{
"epoch": 0.3894149924772104,
"grad_norm": 0.7980256080627441,
"learning_rate": 1.648e-06,
"loss": 1.1794,
"step": 4400
},
{
"epoch": 0.3982653332153288,
"grad_norm": 0.9903015494346619,
"learning_rate": 1.6399999999999998e-06,
"loss": 1.2027,
"step": 4500
},
{
"epoch": 0.4071156739534472,
"grad_norm": 0.7119297385215759,
"learning_rate": 1.6319999999999998e-06,
"loss": 1.1638,
"step": 4600
},
{
"epoch": 0.41596601469156563,
"grad_norm": 0.9170375466346741,
"learning_rate": 1.624e-06,
"loss": 1.1706,
"step": 4700
},
{
"epoch": 0.42481635542968404,
"grad_norm": 0.7098402976989746,
"learning_rate": 1.616e-06,
"loss": 1.1738,
"step": 4800
},
{
"epoch": 0.43366669616780246,
"grad_norm": 0.9326470494270325,
"learning_rate": 1.608e-06,
"loss": 1.1526,
"step": 4900
},
{
"epoch": 0.44251703690592087,
"grad_norm": 0.7765191197395325,
"learning_rate": 1.6e-06,
"loss": 1.1687,
"step": 5000
},
{
"epoch": 0.4513673776440393,
"grad_norm": 0.8953288197517395,
"learning_rate": 1.592e-06,
"loss": 1.1763,
"step": 5100
},
{
"epoch": 0.4602177183821577,
"grad_norm": 1.0437840223312378,
"learning_rate": 1.584e-06,
"loss": 1.1883,
"step": 5200
},
{
"epoch": 0.4690680591202761,
"grad_norm": 0.8432090282440186,
"learning_rate": 1.576e-06,
"loss": 1.1826,
"step": 5300
},
{
"epoch": 0.47791839985839457,
"grad_norm": 0.7580952048301697,
"learning_rate": 1.568e-06,
"loss": 1.1813,
"step": 5400
},
{
"epoch": 0.486768740596513,
"grad_norm": 0.9914817214012146,
"learning_rate": 1.5599999999999999e-06,
"loss": 1.1751,
"step": 5500
},
{
"epoch": 0.4956190813346314,
"grad_norm": 0.7643041610717773,
"learning_rate": 1.552e-06,
"loss": 1.1615,
"step": 5600
},
{
"epoch": 0.5044694220727498,
"grad_norm": 0.8275469541549683,
"learning_rate": 1.544e-06,
"loss": 1.188,
"step": 5700
},
{
"epoch": 0.5133197628108682,
"grad_norm": 1.950104832649231,
"learning_rate": 1.536e-06,
"loss": 1.2018,
"step": 5800
},
{
"epoch": 0.5221701035489866,
"grad_norm": 0.8191819787025452,
"learning_rate": 1.528e-06,
"loss": 1.1865,
"step": 5900
},
{
"epoch": 0.531020444287105,
"grad_norm": 0.7783402800559998,
"learning_rate": 1.5199999999999998e-06,
"loss": 1.1687,
"step": 6000
},
{
"epoch": 0.5398707850252235,
"grad_norm": 1.0385206937789917,
"learning_rate": 1.5119999999999999e-06,
"loss": 1.1883,
"step": 6100
},
{
"epoch": 0.5487211257633419,
"grad_norm": 0.9720994234085083,
"learning_rate": 1.504e-06,
"loss": 1.1824,
"step": 6200
},
{
"epoch": 0.5575714665014603,
"grad_norm": 0.9594699740409851,
"learning_rate": 1.496e-06,
"loss": 1.1908,
"step": 6300
},
{
"epoch": 0.5664218072395787,
"grad_norm": 0.9793533682823181,
"learning_rate": 1.488e-06,
"loss": 1.1385,
"step": 6400
},
{
"epoch": 0.5752721479776971,
"grad_norm": 0.9187788367271423,
"learning_rate": 1.48e-06,
"loss": 1.1536,
"step": 6500
},
{
"epoch": 0.5841224887158155,
"grad_norm": 0.9121326208114624,
"learning_rate": 1.4719999999999998e-06,
"loss": 1.1577,
"step": 6600
},
{
"epoch": 0.5929728294539339,
"grad_norm": 1.3253475427627563,
"learning_rate": 1.4639999999999999e-06,
"loss": 1.1462,
"step": 6700
},
{
"epoch": 0.6018231701920524,
"grad_norm": 0.9218689799308777,
"learning_rate": 1.456e-06,
"loss": 1.1745,
"step": 6800
},
{
"epoch": 0.6106735109301709,
"grad_norm": 0.8204641938209534,
"learning_rate": 1.448e-06,
"loss": 1.1871,
"step": 6900
},
{
"epoch": 0.6195238516682893,
"grad_norm": 0.8714615702629089,
"learning_rate": 1.44e-06,
"loss": 1.183,
"step": 7000
},
{
"epoch": 0.6283741924064077,
"grad_norm": 1.2564398050308228,
"learning_rate": 1.4319999999999998e-06,
"loss": 1.1614,
"step": 7100
},
{
"epoch": 0.6372245331445261,
"grad_norm": 0.8741295337677002,
"learning_rate": 1.4239999999999998e-06,
"loss": 1.1314,
"step": 7200
},
{
"epoch": 0.6460748738826445,
"grad_norm": 0.8480414152145386,
"learning_rate": 1.4159999999999999e-06,
"loss": 1.1522,
"step": 7300
},
{
"epoch": 0.6549252146207629,
"grad_norm": 1.0986804962158203,
"learning_rate": 1.408e-06,
"loss": 1.1885,
"step": 7400
},
{
"epoch": 0.6637755553588813,
"grad_norm": 0.9965903759002686,
"learning_rate": 1.4e-06,
"loss": 1.1804,
"step": 7500
},
{
"epoch": 0.6726258960969997,
"grad_norm": 1.079156756401062,
"learning_rate": 1.3919999999999998e-06,
"loss": 1.1797,
"step": 7600
},
{
"epoch": 0.6814762368351182,
"grad_norm": 1.0092281103134155,
"learning_rate": 1.3839999999999998e-06,
"loss": 1.1613,
"step": 7700
},
{
"epoch": 0.6903265775732366,
"grad_norm": 0.9988503456115723,
"learning_rate": 1.3759999999999998e-06,
"loss": 1.1526,
"step": 7800
},
{
"epoch": 0.699176918311355,
"grad_norm": 1.0048474073410034,
"learning_rate": 1.368e-06,
"loss": 1.1804,
"step": 7900
},
{
"epoch": 0.7080272590494734,
"grad_norm": 0.7530746459960938,
"learning_rate": 1.3600000000000001e-06,
"loss": 1.1465,
"step": 8000
},
{
"epoch": 0.7168775997875918,
"grad_norm": 0.6247321963310242,
"learning_rate": 1.352e-06,
"loss": 1.1385,
"step": 8100
},
{
"epoch": 0.7257279405257102,
"grad_norm": 0.7886361479759216,
"learning_rate": 1.344e-06,
"loss": 1.1857,
"step": 8200
},
{
"epoch": 0.7345782812638286,
"grad_norm": 0.8461400270462036,
"learning_rate": 1.336e-06,
"loss": 1.1612,
"step": 8300
},
{
"epoch": 0.743428622001947,
"grad_norm": 1.109844446182251,
"learning_rate": 1.328e-06,
"loss": 1.1406,
"step": 8400
},
{
"epoch": 0.7522789627400654,
"grad_norm": 0.8707802891731262,
"learning_rate": 1.32e-06,
"loss": 1.1719,
"step": 8500
},
{
"epoch": 0.7611293034781839,
"grad_norm": 0.8593458533287048,
"learning_rate": 1.312e-06,
"loss": 1.1696,
"step": 8600
},
{
"epoch": 0.7699796442163023,
"grad_norm": 0.8872305750846863,
"learning_rate": 1.304e-06,
"loss": 1.1772,
"step": 8700
},
{
"epoch": 0.7788299849544208,
"grad_norm": 0.9227966070175171,
"learning_rate": 1.296e-06,
"loss": 1.173,
"step": 8800
},
{
"epoch": 0.7876803256925392,
"grad_norm": 0.8818754553794861,
"learning_rate": 1.288e-06,
"loss": 1.1889,
"step": 8900
},
{
"epoch": 0.7965306664306576,
"grad_norm": 1.0658597946166992,
"learning_rate": 1.28e-06,
"loss": 1.1547,
"step": 9000
},
{
"epoch": 0.805381007168776,
"grad_norm": 0.8553236126899719,
"learning_rate": 1.272e-06,
"loss": 1.1172,
"step": 9100
},
{
"epoch": 0.8142313479068944,
"grad_norm": 1.120353102684021,
"learning_rate": 1.2639999999999999e-06,
"loss": 1.1649,
"step": 9200
},
{
"epoch": 0.8230816886450129,
"grad_norm": 0.734362006187439,
"learning_rate": 1.256e-06,
"loss": 1.1938,
"step": 9300
},
{
"epoch": 0.8319320293831313,
"grad_norm": 0.8664830923080444,
"learning_rate": 1.248e-06,
"loss": 1.1666,
"step": 9400
},
{
"epoch": 0.8407823701212497,
"grad_norm": 2.742342948913574,
"learning_rate": 1.24e-06,
"loss": 1.1807,
"step": 9500
},
{
"epoch": 0.8496327108593681,
"grad_norm": 1.302372694015503,
"learning_rate": 1.232e-06,
"loss": 1.1709,
"step": 9600
},
{
"epoch": 0.8584830515974865,
"grad_norm": 0.9084349870681763,
"learning_rate": 1.2239999999999998e-06,
"loss": 1.1405,
"step": 9700
},
{
"epoch": 0.8673333923356049,
"grad_norm": 0.8565220832824707,
"learning_rate": 1.2159999999999999e-06,
"loss": 1.1527,
"step": 9800
},
{
"epoch": 0.8761837330737233,
"grad_norm": 0.7832551002502441,
"learning_rate": 1.208e-06,
"loss": 1.1787,
"step": 9900
},
{
"epoch": 0.8850340738118417,
"grad_norm": 0.8366639018058777,
"learning_rate": 1.2e-06,
"loss": 1.1925,
"step": 10000
}
],
"logging_steps": 100,
"max_steps": 25000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.876647618221867e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}