sedrickkeh's picture
End of training
e0830b5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 15.0,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 5.456483190001166,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.8054,
"step": 1
},
{
"epoch": 0.2,
"grad_norm": 5.759462172287989,
"learning_rate": 2.666666666666667e-06,
"loss": 0.8566,
"step": 2
},
{
"epoch": 0.3,
"grad_norm": 5.647493114157845,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8604,
"step": 3
},
{
"epoch": 0.4,
"grad_norm": 5.062774089894794,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8926,
"step": 4
},
{
"epoch": 0.5,
"grad_norm": 2.742178622247567,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8096,
"step": 5
},
{
"epoch": 0.6,
"grad_norm": 2.094214554696164,
"learning_rate": 8.000000000000001e-06,
"loss": 0.8214,
"step": 6
},
{
"epoch": 0.7,
"grad_norm": 4.578250564308243,
"learning_rate": 9.333333333333334e-06,
"loss": 0.8371,
"step": 7
},
{
"epoch": 0.8,
"grad_norm": 4.914224760901885,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.8386,
"step": 8
},
{
"epoch": 0.9,
"grad_norm": 5.104636711377158,
"learning_rate": 1.2e-05,
"loss": 0.768,
"step": 9
},
{
"epoch": 1.0,
"grad_norm": 4.741500054760724,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.7601,
"step": 10
},
{
"epoch": 1.1,
"grad_norm": 3.0673965583798566,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.6711,
"step": 11
},
{
"epoch": 1.2,
"grad_norm": 2.3130829408950597,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.6996,
"step": 12
},
{
"epoch": 1.3,
"grad_norm": 2.4521476942644096,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.6883,
"step": 13
},
{
"epoch": 1.4,
"grad_norm": 2.060188555365567,
"learning_rate": 1.866666666666667e-05,
"loss": 0.6712,
"step": 14
},
{
"epoch": 1.5,
"grad_norm": 1.5880322665772053,
"learning_rate": 2e-05,
"loss": 0.6361,
"step": 15
},
{
"epoch": 1.6,
"grad_norm": 1.5065788447188164,
"learning_rate": 1.999729241179462e-05,
"loss": 0.6229,
"step": 16
},
{
"epoch": 1.7,
"grad_norm": 1.846665331936718,
"learning_rate": 1.998917111338525e-05,
"loss": 0.6138,
"step": 17
},
{
"epoch": 1.8,
"grad_norm": 1.397736554007089,
"learning_rate": 1.9975640502598243e-05,
"loss": 0.6224,
"step": 18
},
{
"epoch": 1.9,
"grad_norm": 1.250044207797159,
"learning_rate": 1.9956707906498046e-05,
"loss": 0.5978,
"step": 19
},
{
"epoch": 2.0,
"grad_norm": 1.1978733008887823,
"learning_rate": 1.9932383577419432e-05,
"loss": 0.5691,
"step": 20
},
{
"epoch": 2.1,
"grad_norm": 1.3616776529906953,
"learning_rate": 1.9902680687415704e-05,
"loss": 0.5554,
"step": 21
},
{
"epoch": 2.2,
"grad_norm": 1.097602231035553,
"learning_rate": 1.9867615321125796e-05,
"loss": 0.5255,
"step": 22
},
{
"epoch": 2.3,
"grad_norm": 1.0111196181375406,
"learning_rate": 1.9827206467064133e-05,
"loss": 0.4776,
"step": 23
},
{
"epoch": 2.4,
"grad_norm": 0.9955327255126635,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.4818,
"step": 24
},
{
"epoch": 2.5,
"grad_norm": 0.9140689471373215,
"learning_rate": 1.973044870579824e-05,
"loss": 0.4888,
"step": 25
},
{
"epoch": 2.6,
"grad_norm": 0.8950005541922588,
"learning_rate": 1.967415219462864e-05,
"loss": 0.4678,
"step": 26
},
{
"epoch": 2.7,
"grad_norm": 0.9264924413390109,
"learning_rate": 1.961261695938319e-05,
"loss": 0.4916,
"step": 27
},
{
"epoch": 2.8,
"grad_norm": 0.9016767081799504,
"learning_rate": 1.954587632247732e-05,
"loss": 0.4914,
"step": 28
},
{
"epoch": 2.9,
"grad_norm": 0.8711206171973317,
"learning_rate": 1.9473966425143292e-05,
"loss": 0.4279,
"step": 29
},
{
"epoch": 3.0,
"grad_norm": 0.965284052630944,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.4769,
"step": 30
},
{
"epoch": 3.1,
"grad_norm": 0.8710629657905165,
"learning_rate": 1.9314797389261426e-05,
"loss": 0.3557,
"step": 31
},
{
"epoch": 3.2,
"grad_norm": 0.7829249544686165,
"learning_rate": 1.9227624443554425e-05,
"loss": 0.3159,
"step": 32
},
{
"epoch": 3.3,
"grad_norm": 1.1372489228974627,
"learning_rate": 1.913545457642601e-05,
"loss": 0.3496,
"step": 33
},
{
"epoch": 3.4,
"grad_norm": 0.8970662832168831,
"learning_rate": 1.9038337699485207e-05,
"loss": 0.3805,
"step": 34
},
{
"epoch": 3.5,
"grad_norm": 1.0612014407477506,
"learning_rate": 1.8936326403234125e-05,
"loss": 0.3481,
"step": 35
},
{
"epoch": 3.6,
"grad_norm": 0.8370235615255618,
"learning_rate": 1.8829475928589272e-05,
"loss": 0.3333,
"step": 36
},
{
"epoch": 3.7,
"grad_norm": 1.128753299885166,
"learning_rate": 1.8717844136967626e-05,
"loss": 0.3398,
"step": 37
},
{
"epoch": 3.8,
"grad_norm": 1.0163020931362265,
"learning_rate": 1.860149147895366e-05,
"loss": 0.3192,
"step": 38
},
{
"epoch": 3.9,
"grad_norm": 0.7411794509431812,
"learning_rate": 1.848048096156426e-05,
"loss": 0.2694,
"step": 39
},
{
"epoch": 4.0,
"grad_norm": 0.9250164203908562,
"learning_rate": 1.8354878114129368e-05,
"loss": 0.2863,
"step": 40
},
{
"epoch": 4.1,
"grad_norm": 0.8417377767403931,
"learning_rate": 1.8224750952806626e-05,
"loss": 0.2318,
"step": 41
},
{
"epoch": 4.2,
"grad_norm": 0.8880509994076226,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.2463,
"step": 42
},
{
"epoch": 4.3,
"grad_norm": 0.8319508153001826,
"learning_rate": 1.795120796494848e-05,
"loss": 0.2142,
"step": 43
},
{
"epoch": 4.4,
"grad_norm": 0.9069642204701174,
"learning_rate": 1.7807940266766595e-05,
"loss": 0.1861,
"step": 44
},
{
"epoch": 4.5,
"grad_norm": 0.886648766459474,
"learning_rate": 1.766044443118978e-05,
"loss": 0.1981,
"step": 45
},
{
"epoch": 4.6,
"grad_norm": 0.7878272604729076,
"learning_rate": 1.7508800329814993e-05,
"loss": 0.1927,
"step": 46
},
{
"epoch": 4.7,
"grad_norm": 0.859960892022644,
"learning_rate": 1.735309008059829e-05,
"loss": 0.2185,
"step": 47
},
{
"epoch": 4.8,
"grad_norm": 0.8198360072603958,
"learning_rate": 1.7193398003386514e-05,
"loss": 0.1932,
"step": 48
},
{
"epoch": 4.9,
"grad_norm": 0.8197303574303446,
"learning_rate": 1.702981057425662e-05,
"loss": 0.1896,
"step": 49
},
{
"epoch": 5.0,
"grad_norm": 0.7095776631735655,
"learning_rate": 1.686241637868734e-05,
"loss": 0.1808,
"step": 50
},
{
"epoch": 5.1,
"grad_norm": 0.782000326482277,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.1546,
"step": 51
},
{
"epoch": 5.2,
"grad_norm": 0.757071977981014,
"learning_rate": 1.6516572288214555e-05,
"loss": 0.1219,
"step": 52
},
{
"epoch": 5.3,
"grad_norm": 0.7489429204163558,
"learning_rate": 1.63383096739871e-05,
"loss": 0.1047,
"step": 53
},
{
"epoch": 5.4,
"grad_norm": 0.7352612139748114,
"learning_rate": 1.6156614753256583e-05,
"loss": 0.1179,
"step": 54
},
{
"epoch": 5.5,
"grad_norm": 0.7350662711218962,
"learning_rate": 1.5971585917027864e-05,
"loss": 0.1137,
"step": 55
},
{
"epoch": 5.6,
"grad_norm": 0.7039275993910796,
"learning_rate": 1.5783323361679865e-05,
"loss": 0.1172,
"step": 56
},
{
"epoch": 5.7,
"grad_norm": 0.6910516646490666,
"learning_rate": 1.5591929034707468e-05,
"loss": 0.1024,
"step": 57
},
{
"epoch": 5.8,
"grad_norm": 0.7236787235958125,
"learning_rate": 1.539750657951513e-05,
"loss": 0.0989,
"step": 58
},
{
"epoch": 5.9,
"grad_norm": 0.6420220198866008,
"learning_rate": 1.5200161279292154e-05,
"loss": 0.1241,
"step": 59
},
{
"epoch": 6.0,
"grad_norm": 0.6203363523773471,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.0817,
"step": 60
},
{
"epoch": 6.1,
"grad_norm": 0.6104409465029903,
"learning_rate": 1.4797131132502464e-05,
"loss": 0.0601,
"step": 61
},
{
"epoch": 6.2,
"grad_norm": 0.5844775253639324,
"learning_rate": 1.4591664533870118e-05,
"loss": 0.0627,
"step": 62
},
{
"epoch": 6.3,
"grad_norm": 0.5069649786509103,
"learning_rate": 1.4383711467890776e-05,
"loss": 0.0587,
"step": 63
},
{
"epoch": 6.4,
"grad_norm": 0.8916378878545967,
"learning_rate": 1.417338454481818e-05,
"loss": 0.0836,
"step": 64
},
{
"epoch": 6.5,
"grad_norm": 0.6085732114627656,
"learning_rate": 1.396079766039157e-05,
"loss": 0.0444,
"step": 65
},
{
"epoch": 6.6,
"grad_norm": 0.8766545204586843,
"learning_rate": 1.3746065934159123e-05,
"loss": 0.0671,
"step": 66
},
{
"epoch": 6.7,
"grad_norm": 0.5289923674586601,
"learning_rate": 1.3529305647138689e-05,
"loss": 0.0544,
"step": 67
},
{
"epoch": 6.8,
"grad_norm": 0.5631884598314233,
"learning_rate": 1.3310634178849583e-05,
"loss": 0.0532,
"step": 68
},
{
"epoch": 6.9,
"grad_norm": 0.5869614627369972,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.0594,
"step": 69
},
{
"epoch": 7.0,
"grad_norm": 0.49999369328894544,
"learning_rate": 1.2868032327110904e-05,
"loss": 0.0547,
"step": 70
},
{
"epoch": 7.1,
"grad_norm": 0.443457075418473,
"learning_rate": 1.2644341620372025e-05,
"loss": 0.0329,
"step": 71
},
{
"epoch": 7.2,
"grad_norm": 0.4438435027066986,
"learning_rate": 1.2419218955996677e-05,
"loss": 0.0284,
"step": 72
},
{
"epoch": 7.3,
"grad_norm": 0.4800778119424111,
"learning_rate": 1.2192786241879033e-05,
"loss": 0.0404,
"step": 73
},
{
"epoch": 7.4,
"grad_norm": 0.4379474529157175,
"learning_rate": 1.1965166095328302e-05,
"loss": 0.0225,
"step": 74
},
{
"epoch": 7.5,
"grad_norm": 0.4429369566894146,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.0223,
"step": 75
},
{
"epoch": 7.6,
"grad_norm": 0.40432933172297614,
"learning_rate": 1.1506857122494832e-05,
"loss": 0.0244,
"step": 76
},
{
"epoch": 7.7,
"grad_norm": 0.48073981290705153,
"learning_rate": 1.127641647860595e-05,
"loss": 0.0319,
"step": 77
},
{
"epoch": 7.8,
"grad_norm": 0.5001498731871603,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.0307,
"step": 78
},
{
"epoch": 7.9,
"grad_norm": 0.3920879064027223,
"learning_rate": 1.0813586746678584e-05,
"loss": 0.0254,
"step": 79
},
{
"epoch": 8.0,
"grad_norm": 0.4091992943404657,
"learning_rate": 1.0581448289104759e-05,
"loss": 0.0259,
"step": 80
},
{
"epoch": 8.1,
"grad_norm": 0.24591634835172133,
"learning_rate": 1.0348994967025012e-05,
"loss": 0.0129,
"step": 81
},
{
"epoch": 8.2,
"grad_norm": 0.27732329015357815,
"learning_rate": 1.0116352658013973e-05,
"loss": 0.0118,
"step": 82
},
{
"epoch": 8.3,
"grad_norm": 0.2737809179611153,
"learning_rate": 9.883647341986032e-06,
"loss": 0.0125,
"step": 83
},
{
"epoch": 8.4,
"grad_norm": 0.30306810527445566,
"learning_rate": 9.651005032974994e-06,
"loss": 0.0154,
"step": 84
},
{
"epoch": 8.5,
"grad_norm": 0.30379855260514727,
"learning_rate": 9.418551710895243e-06,
"loss": 0.0118,
"step": 85
},
{
"epoch": 8.6,
"grad_norm": 0.4811355659404676,
"learning_rate": 9.18641325332142e-06,
"loss": 0.0155,
"step": 86
},
{
"epoch": 8.7,
"grad_norm": 0.37455252522236027,
"learning_rate": 8.954715367323468e-06,
"loss": 0.0109,
"step": 87
},
{
"epoch": 8.8,
"grad_norm": 0.3382983231236897,
"learning_rate": 8.723583521394054e-06,
"loss": 0.0152,
"step": 88
},
{
"epoch": 8.9,
"grad_norm": 0.3297109614209282,
"learning_rate": 8.49314287750517e-06,
"loss": 0.0119,
"step": 89
},
{
"epoch": 9.0,
"grad_norm": 0.3116900824217769,
"learning_rate": 8.263518223330698e-06,
"loss": 0.0099,
"step": 90
},
{
"epoch": 9.1,
"grad_norm": 0.2048883072228153,
"learning_rate": 8.034833904671698e-06,
"loss": 0.0068,
"step": 91
},
{
"epoch": 9.2,
"grad_norm": 0.16372430312031436,
"learning_rate": 7.807213758120965e-06,
"loss": 0.0046,
"step": 92
},
{
"epoch": 9.3,
"grad_norm": 0.17126616465729205,
"learning_rate": 7.580781044003324e-06,
"loss": 0.0045,
"step": 93
},
{
"epoch": 9.4,
"grad_norm": 0.17140249123203252,
"learning_rate": 7.355658379627981e-06,
"loss": 0.0054,
"step": 94
},
{
"epoch": 9.5,
"grad_norm": 0.21145963284827615,
"learning_rate": 7.131967672889101e-06,
"loss": 0.0064,
"step": 95
},
{
"epoch": 9.6,
"grad_norm": 0.16836913651183916,
"learning_rate": 6.909830056250527e-06,
"loss": 0.0038,
"step": 96
},
{
"epoch": 9.7,
"grad_norm": 0.18115546955306835,
"learning_rate": 6.689365821150421e-06,
"loss": 0.0052,
"step": 97
},
{
"epoch": 9.8,
"grad_norm": 0.2577468160241198,
"learning_rate": 6.4706943528613135e-06,
"loss": 0.0056,
"step": 98
},
{
"epoch": 9.9,
"grad_norm": 0.19659235565523647,
"learning_rate": 6.25393406584088e-06,
"loss": 0.0042,
"step": 99
},
{
"epoch": 10.0,
"grad_norm": 0.23535354069396494,
"learning_rate": 6.039202339608432e-06,
"loss": 0.0043,
"step": 100
},
{
"epoch": 10.1,
"grad_norm": 0.08757863007098991,
"learning_rate": 5.8266154551818225e-06,
"loss": 0.0022,
"step": 101
},
{
"epoch": 10.2,
"grad_norm": 0.11715962745878235,
"learning_rate": 5.616288532109225e-06,
"loss": 0.0022,
"step": 102
},
{
"epoch": 10.3,
"grad_norm": 0.12361827660694866,
"learning_rate": 5.4083354661298816e-06,
"loss": 0.002,
"step": 103
},
{
"epoch": 10.4,
"grad_norm": 0.13828988254954486,
"learning_rate": 5.202868867497542e-06,
"loss": 0.0025,
"step": 104
},
{
"epoch": 10.5,
"grad_norm": 0.1215485981095267,
"learning_rate": 5.000000000000003e-06,
"loss": 0.002,
"step": 105
},
{
"epoch": 10.6,
"grad_norm": 0.10696031651620225,
"learning_rate": 4.799838720707847e-06,
"loss": 0.0024,
"step": 106
},
{
"epoch": 10.7,
"grad_norm": 0.11537485121928975,
"learning_rate": 4.6024934204848745e-06,
"loss": 0.0019,
"step": 107
},
{
"epoch": 10.8,
"grad_norm": 0.11477645379676313,
"learning_rate": 4.408070965292534e-06,
"loss": 0.0023,
"step": 108
},
{
"epoch": 10.9,
"grad_norm": 0.09407959791293473,
"learning_rate": 4.216676638320135e-06,
"loss": 0.0017,
"step": 109
},
{
"epoch": 11.0,
"grad_norm": 0.10539572481392215,
"learning_rate": 4.028414082972141e-06,
"loss": 0.0022,
"step": 110
},
{
"epoch": 11.1,
"grad_norm": 0.04526592878878114,
"learning_rate": 3.8433852467434175e-06,
"loss": 0.0013,
"step": 111
},
{
"epoch": 11.2,
"grad_norm": 0.04747157304079231,
"learning_rate": 3.661690326012897e-06,
"loss": 0.0012,
"step": 112
},
{
"epoch": 11.3,
"grad_norm": 0.05989856905170414,
"learning_rate": 3.483427711785449e-06,
"loss": 0.0014,
"step": 113
},
{
"epoch": 11.4,
"grad_norm": 0.07405131299600318,
"learning_rate": 3.308693936411421e-06,
"loss": 0.0012,
"step": 114
},
{
"epoch": 11.5,
"grad_norm": 0.036105812123230004,
"learning_rate": 3.1375836213126653e-06,
"loss": 0.0012,
"step": 115
},
{
"epoch": 11.6,
"grad_norm": 0.04336874203309867,
"learning_rate": 2.970189425743383e-06,
"loss": 0.0015,
"step": 116
},
{
"epoch": 11.7,
"grad_norm": 0.03911723515161731,
"learning_rate": 2.8066019966134907e-06,
"loss": 0.0014,
"step": 117
},
{
"epoch": 11.8,
"grad_norm": 0.06319472371081573,
"learning_rate": 2.6469099194017144e-06,
"loss": 0.0013,
"step": 118
},
{
"epoch": 11.9,
"grad_norm": 0.06203490181593157,
"learning_rate": 2.4911996701850083e-06,
"loss": 0.0012,
"step": 119
},
{
"epoch": 12.0,
"grad_norm": 0.038461603051341636,
"learning_rate": 2.339555568810221e-06,
"loss": 0.0011,
"step": 120
},
{
"epoch": 12.1,
"grad_norm": 0.03151892994828768,
"learning_rate": 2.192059733233408e-06,
"loss": 0.0011,
"step": 121
},
{
"epoch": 12.2,
"grad_norm": 0.026069986960984427,
"learning_rate": 2.048792035051521e-06,
"loss": 0.001,
"step": 122
},
{
"epoch": 12.3,
"grad_norm": 0.029353134476379206,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.0011,
"step": 123
},
{
"epoch": 12.4,
"grad_norm": 0.026449026260907393,
"learning_rate": 1.7752490471933769e-06,
"loss": 0.001,
"step": 124
},
{
"epoch": 12.5,
"grad_norm": 0.024494407561961196,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.001,
"step": 125
},
{
"epoch": 12.6,
"grad_norm": 0.02435497987035409,
"learning_rate": 1.5195190384357405e-06,
"loss": 0.0009,
"step": 126
},
{
"epoch": 12.7,
"grad_norm": 0.026479892781776853,
"learning_rate": 1.3985085210463479e-06,
"loss": 0.0008,
"step": 127
},
{
"epoch": 12.8,
"grad_norm": 0.02103523842744242,
"learning_rate": 1.282155863032377e-06,
"loss": 0.0009,
"step": 128
},
{
"epoch": 12.9,
"grad_norm": 0.026913938950377716,
"learning_rate": 1.1705240714107301e-06,
"loss": 0.0008,
"step": 129
},
{
"epoch": 13.0,
"grad_norm": 0.021497261239699632,
"learning_rate": 1.0636735967658785e-06,
"loss": 0.0009,
"step": 130
},
{
"epoch": 13.1,
"grad_norm": 0.01744937560052008,
"learning_rate": 9.616623005147952e-07,
"loss": 0.0008,
"step": 131
},
{
"epoch": 13.2,
"grad_norm": 0.022830147389962408,
"learning_rate": 8.645454235739903e-07,
"loss": 0.001,
"step": 132
},
{
"epoch": 13.3,
"grad_norm": 0.018549892449922493,
"learning_rate": 7.723755564455771e-07,
"loss": 0.0008,
"step": 133
},
{
"epoch": 13.4,
"grad_norm": 0.018499963246780587,
"learning_rate": 6.852026107385756e-07,
"loss": 0.0007,
"step": 134
},
{
"epoch": 13.5,
"grad_norm": 0.020186180806129512,
"learning_rate": 6.030737921409169e-07,
"loss": 0.0008,
"step": 135
},
{
"epoch": 13.6,
"grad_norm": 0.02160863937701566,
"learning_rate": 5.26033574856708e-07,
"loss": 0.0008,
"step": 136
},
{
"epoch": 13.7,
"grad_norm": 0.021380770597770406,
"learning_rate": 4.5412367752268094e-07,
"loss": 0.0009,
"step": 137
},
{
"epoch": 13.8,
"grad_norm": 0.017139391436868278,
"learning_rate": 3.8738304061681107e-07,
"loss": 0.0008,
"step": 138
},
{
"epoch": 13.9,
"grad_norm": 0.015543842724841007,
"learning_rate": 3.2584780537136206e-07,
"loss": 0.0006,
"step": 139
},
{
"epoch": 14.0,
"grad_norm": 0.023669058624946445,
"learning_rate": 2.6955129420176193e-07,
"loss": 0.0008,
"step": 140
},
{
"epoch": 14.1,
"grad_norm": 0.017273400705971477,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.0007,
"step": 141
},
{
"epoch": 14.2,
"grad_norm": 0.017728316737897203,
"learning_rate": 1.7279353293586765e-07,
"loss": 0.0008,
"step": 142
},
{
"epoch": 14.3,
"grad_norm": 0.019827798313299654,
"learning_rate": 1.323846788742078e-07,
"loss": 0.0009,
"step": 143
},
{
"epoch": 14.4,
"grad_norm": 0.016917306292850116,
"learning_rate": 9.731931258429638e-08,
"loss": 0.0007,
"step": 144
},
{
"epoch": 14.5,
"grad_norm": 0.01653817553682287,
"learning_rate": 6.761642258056977e-08,
"loss": 0.0007,
"step": 145
},
{
"epoch": 14.6,
"grad_norm": 0.03480683137636861,
"learning_rate": 4.329209350195651e-08,
"loss": 0.0007,
"step": 146
},
{
"epoch": 14.7,
"grad_norm": 0.020559555488437672,
"learning_rate": 2.4359497401758026e-08,
"loss": 0.001,
"step": 147
},
{
"epoch": 14.8,
"grad_norm": 0.018105533298564763,
"learning_rate": 1.0828886614754342e-08,
"loss": 0.0008,
"step": 148
},
{
"epoch": 14.9,
"grad_norm": 0.019042313810839078,
"learning_rate": 2.7075882053828605e-09,
"loss": 0.0008,
"step": 149
},
{
"epoch": 15.0,
"grad_norm": 0.018967228362383,
"learning_rate": 0.0,
"loss": 0.0008,
"step": 150
},
{
"epoch": 15.0,
"step": 150,
"total_flos": 8.481547119440691e+16,
"train_loss": 0.1808963591993476,
"train_runtime": 6243.0048,
"train_samples_per_second": 0.759,
"train_steps_per_second": 0.024
}
],
"logging_steps": 1.0,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.481547119440691e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}