Yaxin1992's picture
Training in progress, step 5000, checkpoint
a068dd4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.44251703690592087,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008850340738118417,
"grad_norm": 1.517469048500061,
"learning_rate": 1.9919999999999997e-06,
"loss": 1.519,
"step": 100
},
{
"epoch": 0.017700681476236834,
"grad_norm": 1.6662976741790771,
"learning_rate": 1.984e-06,
"loss": 1.4776,
"step": 200
},
{
"epoch": 0.026551022214355253,
"grad_norm": 0.8021761775016785,
"learning_rate": 1.976e-06,
"loss": 1.4026,
"step": 300
},
{
"epoch": 0.03540136295247367,
"grad_norm": 0.8772447109222412,
"learning_rate": 1.968e-06,
"loss": 1.3306,
"step": 400
},
{
"epoch": 0.04425170369059209,
"grad_norm": 0.4179963767528534,
"learning_rate": 1.96e-06,
"loss": 1.286,
"step": 500
},
{
"epoch": 0.053102044428710506,
"grad_norm": 0.4829399287700653,
"learning_rate": 1.9519999999999997e-06,
"loss": 1.2756,
"step": 600
},
{
"epoch": 0.061952385166828924,
"grad_norm": 0.5651261806488037,
"learning_rate": 1.944e-06,
"loss": 1.2721,
"step": 700
},
{
"epoch": 0.07080272590494734,
"grad_norm": 0.5160872936248779,
"learning_rate": 1.9359999999999998e-06,
"loss": 1.2579,
"step": 800
},
{
"epoch": 0.07965306664306576,
"grad_norm": 0.7501122355461121,
"learning_rate": 1.928e-06,
"loss": 1.2853,
"step": 900
},
{
"epoch": 0.08850340738118417,
"grad_norm": 0.591393232345581,
"learning_rate": 1.92e-06,
"loss": 1.2472,
"step": 1000
},
{
"epoch": 0.0973537481193026,
"grad_norm": 0.5573005080223083,
"learning_rate": 1.9119999999999997e-06,
"loss": 1.2348,
"step": 1100
},
{
"epoch": 0.10620408885742101,
"grad_norm": 0.607552707195282,
"learning_rate": 1.904e-06,
"loss": 1.2679,
"step": 1200
},
{
"epoch": 0.11505442959553942,
"grad_norm": 0.6899151802062988,
"learning_rate": 1.8959999999999997e-06,
"loss": 1.2513,
"step": 1300
},
{
"epoch": 0.12390477033365785,
"grad_norm": 0.5849478244781494,
"learning_rate": 1.8879999999999998e-06,
"loss": 1.226,
"step": 1400
},
{
"epoch": 0.13275511107177626,
"grad_norm": 0.6027043461799622,
"learning_rate": 1.8799999999999998e-06,
"loss": 1.2163,
"step": 1500
},
{
"epoch": 0.14160545180989467,
"grad_norm": 0.6785966157913208,
"learning_rate": 1.872e-06,
"loss": 1.2076,
"step": 1600
},
{
"epoch": 0.1504557925480131,
"grad_norm": 0.5975379347801208,
"learning_rate": 1.864e-06,
"loss": 1.2146,
"step": 1700
},
{
"epoch": 0.15930613328613152,
"grad_norm": 0.685453474521637,
"learning_rate": 1.856e-06,
"loss": 1.198,
"step": 1800
},
{
"epoch": 0.16815647402424994,
"grad_norm": 1.1250847578048706,
"learning_rate": 1.848e-06,
"loss": 1.218,
"step": 1900
},
{
"epoch": 0.17700681476236835,
"grad_norm": 0.6129019260406494,
"learning_rate": 1.84e-06,
"loss": 1.1894,
"step": 2000
},
{
"epoch": 0.18585715550048676,
"grad_norm": 0.7388463616371155,
"learning_rate": 1.832e-06,
"loss": 1.1962,
"step": 2100
},
{
"epoch": 0.1947074962386052,
"grad_norm": 0.5909475684165955,
"learning_rate": 1.824e-06,
"loss": 1.241,
"step": 2200
},
{
"epoch": 0.2035578369767236,
"grad_norm": 0.5264613032341003,
"learning_rate": 1.816e-06,
"loss": 1.1915,
"step": 2300
},
{
"epoch": 0.21240817771484202,
"grad_norm": 0.6419306397438049,
"learning_rate": 1.8079999999999999e-06,
"loss": 1.1938,
"step": 2400
},
{
"epoch": 0.22125851845296043,
"grad_norm": 0.6240788698196411,
"learning_rate": 1.8e-06,
"loss": 1.214,
"step": 2500
},
{
"epoch": 0.23010885919107885,
"grad_norm": 0.7954173684120178,
"learning_rate": 1.792e-06,
"loss": 1.195,
"step": 2600
},
{
"epoch": 0.23895919992919729,
"grad_norm": 0.7451553344726562,
"learning_rate": 1.784e-06,
"loss": 1.1793,
"step": 2700
},
{
"epoch": 0.2478095406673157,
"grad_norm": 0.8471085429191589,
"learning_rate": 1.776e-06,
"loss": 1.1895,
"step": 2800
},
{
"epoch": 0.2566598814054341,
"grad_norm": 1.0085513591766357,
"learning_rate": 1.7679999999999998e-06,
"loss": 1.2087,
"step": 2900
},
{
"epoch": 0.2655102221435525,
"grad_norm": 0.7709102630615234,
"learning_rate": 1.7599999999999999e-06,
"loss": 1.1959,
"step": 3000
},
{
"epoch": 0.27436056288167093,
"grad_norm": 0.7612162232398987,
"learning_rate": 1.752e-06,
"loss": 1.1803,
"step": 3100
},
{
"epoch": 0.28321090361978934,
"grad_norm": 1.0669310092926025,
"learning_rate": 1.744e-06,
"loss": 1.1548,
"step": 3200
},
{
"epoch": 0.29206124435790776,
"grad_norm": 0.7489383220672607,
"learning_rate": 1.736e-06,
"loss": 1.1654,
"step": 3300
},
{
"epoch": 0.3009115850960262,
"grad_norm": 0.8011970520019531,
"learning_rate": 1.7279999999999998e-06,
"loss": 1.1789,
"step": 3400
},
{
"epoch": 0.30976192583414464,
"grad_norm": 0.7374864816665649,
"learning_rate": 1.7199999999999998e-06,
"loss": 1.171,
"step": 3500
},
{
"epoch": 0.31861226657226305,
"grad_norm": 0.8267546892166138,
"learning_rate": 1.7119999999999999e-06,
"loss": 1.1435,
"step": 3600
},
{
"epoch": 0.32746260731038146,
"grad_norm": 0.8837570548057556,
"learning_rate": 1.704e-06,
"loss": 1.1674,
"step": 3700
},
{
"epoch": 0.33631294804849987,
"grad_norm": 0.7216938138008118,
"learning_rate": 1.696e-06,
"loss": 1.1446,
"step": 3800
},
{
"epoch": 0.3451632887866183,
"grad_norm": 0.8478527069091797,
"learning_rate": 1.6879999999999998e-06,
"loss": 1.1721,
"step": 3900
},
{
"epoch": 0.3540136295247367,
"grad_norm": 0.8281861543655396,
"learning_rate": 1.6799999999999998e-06,
"loss": 1.1741,
"step": 4000
},
{
"epoch": 0.3628639702628551,
"grad_norm": 0.6918274760246277,
"learning_rate": 1.6719999999999998e-06,
"loss": 1.1433,
"step": 4100
},
{
"epoch": 0.3717143110009735,
"grad_norm": 0.6722283363342285,
"learning_rate": 1.6639999999999999e-06,
"loss": 1.189,
"step": 4200
},
{
"epoch": 0.38056465173909193,
"grad_norm": 0.8337692022323608,
"learning_rate": 1.656e-06,
"loss": 1.1694,
"step": 4300
},
{
"epoch": 0.3894149924772104,
"grad_norm": 0.7980256080627441,
"learning_rate": 1.648e-06,
"loss": 1.1794,
"step": 4400
},
{
"epoch": 0.3982653332153288,
"grad_norm": 0.9903015494346619,
"learning_rate": 1.6399999999999998e-06,
"loss": 1.2027,
"step": 4500
},
{
"epoch": 0.4071156739534472,
"grad_norm": 0.7119297385215759,
"learning_rate": 1.6319999999999998e-06,
"loss": 1.1638,
"step": 4600
},
{
"epoch": 0.41596601469156563,
"grad_norm": 0.9170375466346741,
"learning_rate": 1.624e-06,
"loss": 1.1706,
"step": 4700
},
{
"epoch": 0.42481635542968404,
"grad_norm": 0.7098402976989746,
"learning_rate": 1.616e-06,
"loss": 1.1738,
"step": 4800
},
{
"epoch": 0.43366669616780246,
"grad_norm": 0.9326470494270325,
"learning_rate": 1.608e-06,
"loss": 1.1526,
"step": 4900
},
{
"epoch": 0.44251703690592087,
"grad_norm": 0.7765191197395325,
"learning_rate": 1.6e-06,
"loss": 1.1687,
"step": 5000
}
],
"logging_steps": 100,
"max_steps": 25000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.916766417699275e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}