lerobot_v2_ball040-mnsx4wnl5s / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
56a935a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.89010989010989,
"eval_steps": 500,
"global_step": 1810,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10989010989010989,
"grad_norm": 6.35939884185791,
"learning_rate": 2.1978021978021977e-05,
"loss": 1.3933,
"step": 10
},
{
"epoch": 0.21978021978021978,
"grad_norm": 3.3019938468933105,
"learning_rate": 4.3956043956043955e-05,
"loss": 0.6013,
"step": 20
},
{
"epoch": 0.32967032967032966,
"grad_norm": 1.7010605335235596,
"learning_rate": 6.593406593406594e-05,
"loss": 0.3661,
"step": 30
},
{
"epoch": 0.43956043956043955,
"grad_norm": 1.9770668745040894,
"learning_rate": 8.791208791208791e-05,
"loss": 0.2746,
"step": 40
},
{
"epoch": 0.5494505494505495,
"grad_norm": 1.2615512609481812,
"learning_rate": 0.0001098901098901099,
"loss": 0.2108,
"step": 50
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.9937906265258789,
"learning_rate": 0.00013186813186813188,
"loss": 0.1888,
"step": 60
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.7049073576927185,
"learning_rate": 0.00015384615384615385,
"loss": 0.166,
"step": 70
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.6901200413703918,
"learning_rate": 0.00017582417582417582,
"loss": 0.1526,
"step": 80
},
{
"epoch": 0.989010989010989,
"grad_norm": 1.3578896522521973,
"learning_rate": 0.0001978021978021978,
"loss": 0.1373,
"step": 90
},
{
"epoch": 1.098901098901099,
"grad_norm": 0.9918624758720398,
"learning_rate": 0.00019998647325745995,
"loss": 0.1303,
"step": 100
},
{
"epoch": 1.2087912087912087,
"grad_norm": 0.9448220133781433,
"learning_rate": 0.00019993971884561357,
"loss": 0.1236,
"step": 110
},
{
"epoch": 1.3186813186813187,
"grad_norm": 0.5491310358047485,
"learning_rate": 0.00019985958537951462,
"loss": 0.1164,
"step": 120
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.6634742617607117,
"learning_rate": 0.00019974609962308986,
"loss": 0.1024,
"step": 130
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.652184784412384,
"learning_rate": 0.00019959929947966,
"loss": 0.108,
"step": 140
},
{
"epoch": 1.6483516483516483,
"grad_norm": 0.42199188470840454,
"learning_rate": 0.00019941923397928047,
"loss": 0.0963,
"step": 150
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.7065568566322327,
"learning_rate": 0.0001992059632623657,
"loss": 0.099,
"step": 160
},
{
"epoch": 1.8681318681318682,
"grad_norm": 0.6244102120399475,
"learning_rate": 0.0001989595585596026,
"loss": 0.0897,
"step": 170
},
{
"epoch": 1.978021978021978,
"grad_norm": 0.6415707468986511,
"learning_rate": 0.00019868010216816034,
"loss": 0.0781,
"step": 180
},
{
"epoch": 2.087912087912088,
"grad_norm": 0.3453947603702545,
"learning_rate": 0.00019836768742420352,
"loss": 0.079,
"step": 190
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.37823784351348877,
"learning_rate": 0.00019802241867171878,
"loss": 0.075,
"step": 200
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.8650999069213867,
"learning_rate": 0.00019764441122766476,
"loss": 0.0796,
"step": 210
},
{
"epoch": 2.4175824175824174,
"grad_norm": 0.4968554973602295,
"learning_rate": 0.000197233791343457,
"loss": 0.0754,
"step": 220
},
{
"epoch": 2.5274725274725274,
"grad_norm": 0.5782051682472229,
"learning_rate": 0.00019679069616280133,
"loss": 0.076,
"step": 230
},
{
"epoch": 2.6373626373626373,
"grad_norm": 0.4181021749973297,
"learning_rate": 0.00019631527367588864,
"loss": 0.0769,
"step": 240
},
{
"epoch": 2.7472527472527473,
"grad_norm": 0.41538622975349426,
"learning_rate": 0.00019580768266996756,
"loss": 0.0653,
"step": 250
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.3886262774467468,
"learning_rate": 0.00019526809267631073,
"loss": 0.0723,
"step": 260
},
{
"epoch": 2.967032967032967,
"grad_norm": 0.466183602809906,
"learning_rate": 0.0001946966839135928,
"loss": 0.0702,
"step": 270
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.34900644421577454,
"learning_rate": 0.00019409364722769882,
"loss": 0.0638,
"step": 280
},
{
"epoch": 3.186813186813187,
"grad_norm": 0.5098071694374084,
"learning_rate": 0.00019345918402798314,
"loss": 0.0645,
"step": 290
},
{
"epoch": 3.2967032967032965,
"grad_norm": 0.45593976974487305,
"learning_rate": 0.00019279350622000054,
"loss": 0.0691,
"step": 300
},
{
"epoch": 3.4065934065934065,
"grad_norm": 0.3932492733001709,
"learning_rate": 0.0001920968361347314,
"loss": 0.0706,
"step": 310
},
{
"epoch": 3.5164835164835164,
"grad_norm": 0.3770773708820343,
"learning_rate": 0.00019136940645432494,
"loss": 0.0646,
"step": 320
},
{
"epoch": 3.6263736263736264,
"grad_norm": 0.432841420173645,
"learning_rate": 0.00019061146013438527,
"loss": 0.0557,
"step": 330
},
{
"epoch": 3.7362637362637363,
"grad_norm": 0.5559947490692139,
"learning_rate": 0.00018982325032282615,
"loss": 0.0729,
"step": 340
},
{
"epoch": 3.8461538461538463,
"grad_norm": 0.5822933912277222,
"learning_rate": 0.00018900504027532185,
"loss": 0.0632,
"step": 350
},
{
"epoch": 3.956043956043956,
"grad_norm": 0.5372587442398071,
"learning_rate": 0.0001881571032673816,
"loss": 0.0666,
"step": 360
},
{
"epoch": 4.065934065934066,
"grad_norm": 0.4164665937423706,
"learning_rate": 0.000187279722503078,
"loss": 0.0566,
"step": 370
},
{
"epoch": 4.175824175824176,
"grad_norm": 0.570026695728302,
"learning_rate": 0.00018637319102045912,
"loss": 0.0621,
"step": 380
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.3998757302761078,
"learning_rate": 0.00018543781159367607,
"loss": 0.0587,
"step": 390
},
{
"epoch": 4.395604395604396,
"grad_norm": 0.3779265880584717,
"learning_rate": 0.00018447389663185906,
"loss": 0.0568,
"step": 400
},
{
"epoch": 4.5054945054945055,
"grad_norm": 0.4073057174682617,
"learning_rate": 0.0001834817680747751,
"loss": 0.0615,
"step": 410
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.3638319969177246,
"learning_rate": 0.00018246175728530296,
"loss": 0.0548,
"step": 420
},
{
"epoch": 4.725274725274725,
"grad_norm": 0.2937782108783722,
"learning_rate": 0.00018141420493876035,
"loss": 0.0601,
"step": 430
},
{
"epoch": 4.835164835164835,
"grad_norm": 0.4016660749912262,
"learning_rate": 0.00018033946090912148,
"loss": 0.0563,
"step": 440
},
{
"epoch": 4.945054945054945,
"grad_norm": 0.39952585101127625,
"learning_rate": 0.00017923788415216175,
"loss": 0.0554,
"step": 450
},
{
"epoch": 5.054945054945055,
"grad_norm": 0.3408401906490326,
"learning_rate": 0.00017810984258556957,
"loss": 0.0515,
"step": 460
},
{
"epoch": 5.164835164835165,
"grad_norm": 0.3611335754394531,
"learning_rate": 0.00017695571296606465,
"loss": 0.0511,
"step": 470
},
{
"epoch": 5.274725274725275,
"grad_norm": 0.24698808789253235,
"learning_rate": 0.00017577588076356465,
"loss": 0.0531,
"step": 480
},
{
"epoch": 5.384615384615385,
"grad_norm": 0.40450191497802734,
"learning_rate": 0.000174570740032441,
"loss": 0.048,
"step": 490
},
{
"epoch": 5.4945054945054945,
"grad_norm": 0.44147250056266785,
"learning_rate": 0.00017334069327990816,
"loss": 0.0545,
"step": 500
},
{
"epoch": 5.604395604395604,
"grad_norm": 0.45513996481895447,
"learning_rate": 0.00017208615133158946,
"loss": 0.0505,
"step": 510
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.3807731568813324,
"learning_rate": 0.00017080753319430452,
"loss": 0.0509,
"step": 520
},
{
"epoch": 5.824175824175824,
"grad_norm": 0.288381427526474,
"learning_rate": 0.00016950526591612462,
"loss": 0.0493,
"step": 530
},
{
"epoch": 5.934065934065934,
"grad_norm": 0.234198197722435,
"learning_rate": 0.00016817978444374194,
"loss": 0.0552,
"step": 540
},
{
"epoch": 6.043956043956044,
"grad_norm": 0.33030226826667786,
"learning_rate": 0.00016683153147720097,
"loss": 0.0453,
"step": 550
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.30306366086006165,
"learning_rate": 0.00016546095732204022,
"loss": 0.0521,
"step": 560
},
{
"epoch": 6.263736263736264,
"grad_norm": 0.32483500242233276,
"learning_rate": 0.00016406851973889392,
"loss": 0.0473,
"step": 570
},
{
"epoch": 6.373626373626374,
"grad_norm": 0.3211219310760498,
"learning_rate": 0.00016265468379060365,
"loss": 0.0527,
"step": 580
},
{
"epoch": 6.483516483516484,
"grad_norm": 0.2360507994890213,
"learning_rate": 0.00016121992168689108,
"loss": 0.0449,
"step": 590
},
{
"epoch": 6.593406593406593,
"grad_norm": 0.3557351231575012,
"learning_rate": 0.00015976471262664382,
"loss": 0.0471,
"step": 600
},
{
"epoch": 6.7032967032967035,
"grad_norm": 0.3993636667728424,
"learning_rate": 0.00015828954263786687,
"loss": 0.0461,
"step": 610
},
{
"epoch": 6.813186813186813,
"grad_norm": 0.4391331076622009,
"learning_rate": 0.00015679490441535324,
"loss": 0.0431,
"step": 620
},
{
"epoch": 6.923076923076923,
"grad_norm": 0.311146080493927,
"learning_rate": 0.0001552812971561278,
"loss": 0.0497,
"step": 630
},
{
"epoch": 7.032967032967033,
"grad_norm": 0.29637405276298523,
"learning_rate": 0.00015374922639271963,
"loss": 0.0466,
"step": 640
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.24265693128108978,
"learning_rate": 0.0001521992038243182,
"loss": 0.048,
"step": 650
},
{
"epoch": 7.252747252747253,
"grad_norm": 0.3331180214881897,
"learning_rate": 0.0001506317471458701,
"loss": 0.0444,
"step": 660
},
{
"epoch": 7.362637362637362,
"grad_norm": 0.29099053144454956,
"learning_rate": 0.00014904737987517293,
"loss": 0.0446,
"step": 670
},
{
"epoch": 7.472527472527473,
"grad_norm": 0.26169735193252563,
"learning_rate": 0.00014744663117802475,
"loss": 0.0442,
"step": 680
},
{
"epoch": 7.582417582417582,
"grad_norm": 0.27879634499549866,
"learning_rate": 0.00014583003569148712,
"loss": 0.0424,
"step": 690
},
{
"epoch": 7.6923076923076925,
"grad_norm": 0.3208893835544586,
"learning_rate": 0.00014419813334532036,
"loss": 0.0421,
"step": 700
},
{
"epoch": 7.802197802197802,
"grad_norm": 0.2651744484901428,
"learning_rate": 0.0001425514691816516,
"loss": 0.046,
"step": 710
},
{
"epoch": 7.912087912087912,
"grad_norm": 0.30851149559020996,
"learning_rate": 0.00014089059317293524,
"loss": 0.046,
"step": 720
},
{
"epoch": 8.021978021978022,
"grad_norm": 0.32210397720336914,
"learning_rate": 0.00013921606003826627,
"loss": 0.0448,
"step": 730
},
{
"epoch": 8.131868131868131,
"grad_norm": 0.49959078431129456,
"learning_rate": 0.00013752842905810896,
"loss": 0.0447,
"step": 740
},
{
"epoch": 8.241758241758241,
"grad_norm": 0.5785067677497864,
"learning_rate": 0.00013582826388750153,
"loss": 0.0397,
"step": 750
},
{
"epoch": 8.351648351648352,
"grad_norm": 0.28836700320243835,
"learning_rate": 0.00013411613236779995,
"loss": 0.0493,
"step": 760
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.26861655712127686,
"learning_rate": 0.00013239260633702364,
"loss": 0.0442,
"step": 770
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.3270936608314514,
"learning_rate": 0.00013065826143886616,
"loss": 0.0489,
"step": 780
},
{
"epoch": 8.68131868131868,
"grad_norm": 0.24692483246326447,
"learning_rate": 0.00012891367693043478,
"loss": 0.0441,
"step": 790
},
{
"epoch": 8.791208791208792,
"grad_norm": 0.32261839509010315,
"learning_rate": 0.00012715943548878355,
"loss": 0.042,
"step": 800
},
{
"epoch": 8.901098901098901,
"grad_norm": 0.22817495465278625,
"learning_rate": 0.00012539612301630377,
"loss": 0.0415,
"step": 810
},
{
"epoch": 9.010989010989011,
"grad_norm": 0.16383874416351318,
"learning_rate": 0.00012362432844503725,
"loss": 0.0366,
"step": 820
},
{
"epoch": 9.12087912087912,
"grad_norm": 0.35975322127342224,
"learning_rate": 0.00012184464353997796,
"loss": 0.0424,
"step": 830
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.22110863029956818,
"learning_rate": 0.00012005766270142724,
"loss": 0.0334,
"step": 840
},
{
"epoch": 9.340659340659341,
"grad_norm": 0.26277679204940796,
"learning_rate": 0.00011826398276646897,
"loss": 0.041,
"step": 850
},
{
"epoch": 9.45054945054945,
"grad_norm": 0.29648274183273315,
"learning_rate": 0.00011646420280963081,
"loss": 0.0358,
"step": 860
},
{
"epoch": 9.56043956043956,
"grad_norm": 0.27250102162361145,
"learning_rate": 0.00011465892394279814,
"loss": 0.0402,
"step": 870
},
{
"epoch": 9.67032967032967,
"grad_norm": 0.24232593178749084,
"learning_rate": 0.00011284874911444763,
"loss": 0.0372,
"step": 880
},
{
"epoch": 9.780219780219781,
"grad_norm": 0.25964832305908203,
"learning_rate": 0.00011103428290826736,
"loss": 0.0427,
"step": 890
},
{
"epoch": 9.89010989010989,
"grad_norm": 0.31330835819244385,
"learning_rate": 0.00010921613134123068,
"loss": 0.0397,
"step": 900
},
{
"epoch": 10.0,
"grad_norm": 0.3337639570236206,
"learning_rate": 0.00010739490166119155,
"loss": 0.0363,
"step": 910
},
{
"epoch": 10.10989010989011,
"grad_norm": 0.1793065071105957,
"learning_rate": 0.00010557120214406875,
"loss": 0.0341,
"step": 920
},
{
"epoch": 10.219780219780219,
"grad_norm": 0.32348737120628357,
"learning_rate": 0.00010374564189068641,
"loss": 0.0381,
"step": 930
},
{
"epoch": 10.32967032967033,
"grad_norm": 0.3930850625038147,
"learning_rate": 0.00010191883062333965,
"loss": 0.0398,
"step": 940
},
{
"epoch": 10.43956043956044,
"grad_norm": 0.2554318308830261,
"learning_rate": 0.00010009137848215195,
"loss": 0.034,
"step": 950
},
{
"epoch": 10.54945054945055,
"grad_norm": 0.2729535698890686,
"learning_rate": 9.826389582129352e-05,
"loss": 0.0408,
"step": 960
},
{
"epoch": 10.659340659340659,
"grad_norm": 0.2841566503047943,
"learning_rate": 9.64369930051278e-05,
"loss": 0.0373,
"step": 970
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.18831486999988556,
"learning_rate": 9.4611280204355e-05,
"loss": 0.0354,
"step": 980
},
{
"epoch": 10.87912087912088,
"grad_norm": 0.25787901878356934,
"learning_rate": 9.278736719221964e-05,
"loss": 0.0357,
"step": 990
},
{
"epoch": 10.989010989010989,
"grad_norm": 0.268319696187973,
"learning_rate": 9.096586314085162e-05,
"loss": 0.0357,
"step": 1000
},
{
"epoch": 11.098901098901099,
"grad_norm": 0.29258257150650024,
"learning_rate": 8.914737641780738e-05,
"loss": 0.0298,
"step": 1010
},
{
"epoch": 11.208791208791208,
"grad_norm": 0.26910606026649475,
"learning_rate": 8.733251438288032e-05,
"loss": 0.0385,
"step": 1020
},
{
"epoch": 11.31868131868132,
"grad_norm": 0.275666207075119,
"learning_rate": 8.552188318524737e-05,
"loss": 0.0371,
"step": 1030
},
{
"epoch": 11.428571428571429,
"grad_norm": 0.3032730221748352,
"learning_rate": 8.371608756102028e-05,
"loss": 0.0298,
"step": 1040
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.19398105144500732,
"learning_rate": 8.191573063126842e-05,
"loss": 0.032,
"step": 1050
},
{
"epoch": 11.648351648351648,
"grad_norm": 0.23560473322868347,
"learning_rate": 8.012141370058151e-05,
"loss": 0.0294,
"step": 1060
},
{
"epoch": 11.758241758241759,
"grad_norm": 0.24311786890029907,
"learning_rate": 7.833373605623855e-05,
"loss": 0.034,
"step": 1070
},
{
"epoch": 11.868131868131869,
"grad_norm": 0.1695002168416977,
"learning_rate": 7.655329476805095e-05,
"loss": 0.0345,
"step": 1080
},
{
"epoch": 11.978021978021978,
"grad_norm": 0.1906553953886032,
"learning_rate": 7.478068448894577e-05,
"loss": 0.03,
"step": 1090
},
{
"epoch": 12.087912087912088,
"grad_norm": 0.18722419440746307,
"learning_rate": 7.301649725635689e-05,
"loss": 0.0327,
"step": 1100
},
{
"epoch": 12.197802197802197,
"grad_norm": 0.2109653502702713,
"learning_rate": 7.126132229448883e-05,
"loss": 0.03,
"step": 1110
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.3317616879940033,
"learning_rate": 6.95157458175211e-05,
"loss": 0.0289,
"step": 1120
},
{
"epoch": 12.417582417582418,
"grad_norm": 0.18462032079696655,
"learning_rate": 6.778035083381708e-05,
"loss": 0.0336,
"step": 1130
},
{
"epoch": 12.527472527472527,
"grad_norm": 0.17815662920475006,
"learning_rate": 6.60557169512042e-05,
"loss": 0.0317,
"step": 1140
},
{
"epoch": 12.637362637362637,
"grad_norm": 0.19909383356571198,
"learning_rate": 6.434242018338947e-05,
"loss": 0.0317,
"step": 1150
},
{
"epoch": 12.747252747252748,
"grad_norm": 0.21948197484016418,
"learning_rate": 6.264103275757578e-05,
"loss": 0.0288,
"step": 1160
},
{
"epoch": 12.857142857142858,
"grad_norm": 0.30106085538864136,
"learning_rate": 6.095212292334232e-05,
"loss": 0.0298,
"step": 1170
},
{
"epoch": 12.967032967032967,
"grad_norm": 0.3014363944530487,
"learning_rate": 5.927625476285426e-05,
"loss": 0.0318,
"step": 1180
},
{
"epoch": 13.076923076923077,
"grad_norm": 0.5095160007476807,
"learning_rate": 5.761398800246354e-05,
"loss": 0.0272,
"step": 1190
},
{
"epoch": 13.186813186813186,
"grad_norm": 0.19524632394313812,
"learning_rate": 5.596587782576509e-05,
"loss": 0.0294,
"step": 1200
},
{
"epoch": 13.296703296703297,
"grad_norm": 0.20751602947711945,
"learning_rate": 5.433247468816977e-05,
"loss": 0.0295,
"step": 1210
},
{
"epoch": 13.406593406593407,
"grad_norm": 0.12134165316820145,
"learning_rate": 5.271432413305687e-05,
"loss": 0.0258,
"step": 1220
},
{
"epoch": 13.516483516483516,
"grad_norm": 0.2958245277404785,
"learning_rate": 5.111196660956703e-05,
"loss": 0.0328,
"step": 1230
},
{
"epoch": 13.626373626373626,
"grad_norm": 0.1860036551952362,
"learning_rate": 4.952593729209671e-05,
"loss": 0.028,
"step": 1240
},
{
"epoch": 13.736263736263737,
"grad_norm": 0.16853293776512146,
"learning_rate": 4.7956765901554065e-05,
"loss": 0.0259,
"step": 1250
},
{
"epoch": 13.846153846153847,
"grad_norm": 0.22942283749580383,
"learning_rate": 4.640497652843672e-05,
"loss": 0.0303,
"step": 1260
},
{
"epoch": 13.956043956043956,
"grad_norm": 0.1646496206521988,
"learning_rate": 4.4871087457789584e-05,
"loss": 0.0263,
"step": 1270
},
{
"epoch": 14.065934065934066,
"grad_norm": 0.22191093862056732,
"learning_rate": 4.3355610996102e-05,
"loss": 0.033,
"step": 1280
},
{
"epoch": 14.175824175824175,
"grad_norm": 0.19444125890731812,
"learning_rate": 4.185905330020143e-05,
"loss": 0.0268,
"step": 1290
},
{
"epoch": 14.285714285714286,
"grad_norm": 0.16687805950641632,
"learning_rate": 4.038191420820139e-05,
"loss": 0.0293,
"step": 1300
},
{
"epoch": 14.395604395604396,
"grad_norm": 0.21555913984775543,
"learning_rate": 3.892468707255923e-05,
"loss": 0.0269,
"step": 1310
},
{
"epoch": 14.505494505494505,
"grad_norm": 0.1728578358888626,
"learning_rate": 3.7487858595300884e-05,
"loss": 0.0222,
"step": 1320
},
{
"epoch": 14.615384615384615,
"grad_norm": 0.25217944383621216,
"learning_rate": 3.607190866546578e-05,
"loss": 0.023,
"step": 1330
},
{
"epoch": 14.725274725274724,
"grad_norm": 0.45261967182159424,
"learning_rate": 3.467731019882838e-05,
"loss": 0.0366,
"step": 1340
},
{
"epoch": 14.835164835164836,
"grad_norm": 0.19587543606758118,
"learning_rate": 3.330452897994773e-05,
"loss": 0.0249,
"step": 1350
},
{
"epoch": 14.945054945054945,
"grad_norm": 0.16712340712547302,
"learning_rate": 3.195402350659945e-05,
"loss": 0.0291,
"step": 1360
},
{
"epoch": 15.054945054945055,
"grad_norm": 0.3442732095718384,
"learning_rate": 3.0626244836640993e-05,
"loss": 0.0267,
"step": 1370
},
{
"epoch": 15.164835164835164,
"grad_norm": 0.20348604023456573,
"learning_rate": 2.9321636437362253e-05,
"loss": 0.0255,
"step": 1380
},
{
"epoch": 15.274725274725276,
"grad_norm": 0.18801550567150116,
"learning_rate": 2.8040634037370728e-05,
"loss": 0.0247,
"step": 1390
},
{
"epoch": 15.384615384615385,
"grad_norm": 0.2544891834259033,
"learning_rate": 2.6783665481062113e-05,
"loss": 0.0275,
"step": 1400
},
{
"epoch": 15.494505494505495,
"grad_norm": 0.3431093394756317,
"learning_rate": 2.5551150585723415e-05,
"loss": 0.0238,
"step": 1410
},
{
"epoch": 15.604395604395604,
"grad_norm": 0.17770768702030182,
"learning_rate": 2.4343501001317605e-05,
"loss": 0.0251,
"step": 1420
},
{
"epoch": 15.714285714285714,
"grad_norm": 0.170986145734787,
"learning_rate": 2.3161120072995757e-05,
"loss": 0.0243,
"step": 1430
},
{
"epoch": 15.824175824175825,
"grad_norm": 0.19961877167224884,
"learning_rate": 2.2004402706383043e-05,
"loss": 0.0237,
"step": 1440
},
{
"epoch": 15.934065934065934,
"grad_norm": 0.38295048475265503,
"learning_rate": 2.0873735235683535e-05,
"loss": 0.0239,
"step": 1450
},
{
"epoch": 16.043956043956044,
"grad_norm": 0.15348948538303375,
"learning_rate": 1.976949529464771e-05,
"loss": 0.0282,
"step": 1460
},
{
"epoch": 16.153846153846153,
"grad_norm": 0.3392385244369507,
"learning_rate": 1.8692051690445743e-05,
"loss": 0.0283,
"step": 1470
},
{
"epoch": 16.263736263736263,
"grad_norm": 0.15572497248649597,
"learning_rate": 1.764176428048908e-05,
"loss": 0.0245,
"step": 1480
},
{
"epoch": 16.373626373626372,
"grad_norm": 0.2224312424659729,
"learning_rate": 1.661898385224079e-05,
"loss": 0.0258,
"step": 1490
},
{
"epoch": 16.483516483516482,
"grad_norm": 0.17235592007637024,
"learning_rate": 1.5624052006055544e-05,
"loss": 0.0248,
"step": 1500
},
{
"epoch": 16.593406593406595,
"grad_norm": 0.17149613797664642,
"learning_rate": 1.4657301041087812e-05,
"loss": 0.0243,
"step": 1510
},
{
"epoch": 16.703296703296704,
"grad_norm": 0.18543320894241333,
"learning_rate": 1.3719053844306706e-05,
"loss": 0.0272,
"step": 1520
},
{
"epoch": 16.813186813186814,
"grad_norm": 0.1908874362707138,
"learning_rate": 1.2809623782654223e-05,
"loss": 0.0235,
"step": 1530
},
{
"epoch": 16.923076923076923,
"grad_norm": 0.18022581934928894,
"learning_rate": 1.1929314598383423e-05,
"loss": 0.0263,
"step": 1540
},
{
"epoch": 17.032967032967033,
"grad_norm": 0.1982187032699585,
"learning_rate": 1.1078420307610783e-05,
"loss": 0.0252,
"step": 1550
},
{
"epoch": 17.142857142857142,
"grad_norm": 0.1451161950826645,
"learning_rate": 1.0257225102117363e-05,
"loss": 0.025,
"step": 1560
},
{
"epoch": 17.252747252747252,
"grad_norm": 0.1547311544418335,
"learning_rate": 9.466003254430934e-06,
"loss": 0.024,
"step": 1570
},
{
"epoch": 17.36263736263736,
"grad_norm": 0.20238442718982697,
"learning_rate": 8.705019026221317e-06,
"loss": 0.0249,
"step": 1580
},
{
"epoch": 17.47252747252747,
"grad_norm": 0.18659718334674835,
"learning_rate": 7.974526580038988e-06,
"loss": 0.0257,
"step": 1590
},
{
"epoch": 17.582417582417584,
"grad_norm": 0.2173370122909546,
"learning_rate": 7.2747698944269915e-06,
"loss": 0.0248,
"step": 1600
},
{
"epoch": 17.692307692307693,
"grad_norm": 0.12479448318481445,
"learning_rate": 6.605982682433953e-06,
"loss": 0.0263,
"step": 1610
},
{
"epoch": 17.802197802197803,
"grad_norm": 0.17071430385112762,
"learning_rate": 5.968388313555895e-06,
"loss": 0.0225,
"step": 1620
},
{
"epoch": 17.912087912087912,
"grad_norm": 0.2871854901313782,
"learning_rate": 5.3621997391326565e-06,
"loss": 0.0191,
"step": 1630
},
{
"epoch": 18.021978021978022,
"grad_norm": 0.39435771107673645,
"learning_rate": 4.7876194212238434e-06,
"loss": 0.0291,
"step": 1640
},
{
"epoch": 18.13186813186813,
"grad_norm": 0.18047714233398438,
"learning_rate": 4.24483926498812e-06,
"loss": 0.0207,
"step": 1650
},
{
"epoch": 18.24175824175824,
"grad_norm": 0.19172823429107666,
"learning_rate": 3.734040554588514e-06,
"loss": 0.0214,
"step": 1660
},
{
"epoch": 18.35164835164835,
"grad_norm": 0.2549908757209778,
"learning_rate": 3.255393892644909e-06,
"loss": 0.0205,
"step": 1670
},
{
"epoch": 18.46153846153846,
"grad_norm": 0.21838967502117157,
"learning_rate": 2.8090591432542e-06,
"loss": 0.0214,
"step": 1680
},
{
"epoch": 18.571428571428573,
"grad_norm": 0.2188936024904251,
"learning_rate": 2.3951853785969537e-06,
"loss": 0.0227,
"step": 1690
},
{
"epoch": 18.681318681318682,
"grad_norm": 0.15396256744861603,
"learning_rate": 2.0139108291485574e-06,
"loss": 0.0286,
"step": 1700
},
{
"epoch": 18.791208791208792,
"grad_norm": 0.22776196897029877,
"learning_rate": 1.6653628375112972e-06,
"loss": 0.0248,
"step": 1710
},
{
"epoch": 18.9010989010989,
"grad_norm": 0.17969872057437897,
"learning_rate": 1.349657815883032e-06,
"loss": 0.0227,
"step": 1720
},
{
"epoch": 19.01098901098901,
"grad_norm": 0.22254760563373566,
"learning_rate": 1.0669012071764073e-06,
"loss": 0.0197,
"step": 1730
},
{
"epoch": 19.12087912087912,
"grad_norm": 0.1676974892616272,
"learning_rate": 8.171874498018039e-07,
"loss": 0.0264,
"step": 1740
},
{
"epoch": 19.23076923076923,
"grad_norm": 0.21863488852977753,
"learning_rate": 6.005999461256684e-07,
"loss": 0.0224,
"step": 1750
},
{
"epoch": 19.34065934065934,
"grad_norm": 0.13759975135326385,
"learning_rate": 4.172110346148506e-07,
"loss": 0.0265,
"step": 1760
},
{
"epoch": 19.45054945054945,
"grad_norm": 0.16070395708084106,
"learning_rate": 2.670819656760526e-07,
"loss": 0.0233,
"step": 1770
},
{
"epoch": 19.560439560439562,
"grad_norm": 0.2525140643119812,
"learning_rate": 1.502628811987483e-07,
"loss": 0.0241,
"step": 1780
},
{
"epoch": 19.67032967032967,
"grad_norm": 0.16685707867145538,
"learning_rate": 6.679279780821279e-08,
"loss": 0.0248,
"step": 1790
},
{
"epoch": 19.78021978021978,
"grad_norm": 0.17456871271133423,
"learning_rate": 1.6699593834224036e-08,
"loss": 0.0259,
"step": 1800
},
{
"epoch": 19.89010989010989,
"grad_norm": 0.18164990842342377,
"learning_rate": 0.0,
"loss": 0.0233,
"step": 1810
},
{
"epoch": 19.89010989010989,
"step": 1810,
"total_flos": 2.654273592491328e+17,
"train_loss": 0.05974875727246479,
"train_runtime": 2083.4502,
"train_samples_per_second": 55.6,
"train_steps_per_second": 0.869
}
],
"logging_steps": 10,
"max_steps": 1810,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.654273592491328e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}