T-lite-it-1.0-pseudo-base / trainer_state.json
RefalMachine's picture
Upload folder using huggingface_hub
2f6dc51 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.50003543837267,
"eval_steps": 500,
"global_step": 7055,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.0876745339854e-05,
"eval_accuracy": 0.6605764392507908,
"eval_loss": 1.4750796556472778,
"eval_runtime": 7.2429,
"eval_samples_per_second": 47.909,
"eval_steps_per_second": 0.414,
"step": 1
},
{
"epoch": 0.00070876745339854,
"grad_norm": 1.8515625,
"learning_rate": 1e-05,
"loss": 1.538,
"step": 10
},
{
"epoch": 0.00141753490679708,
"grad_norm": 1.0,
"learning_rate": 2e-05,
"loss": 1.5311,
"step": 20
},
{
"epoch": 0.00212630236019562,
"grad_norm": 0.57421875,
"learning_rate": 3e-05,
"loss": 1.5247,
"step": 30
},
{
"epoch": 0.00283506981359416,
"grad_norm": 0.423828125,
"learning_rate": 4e-05,
"loss": 1.531,
"step": 40
},
{
"epoch": 0.0035438372669927,
"grad_norm": 0.451171875,
"learning_rate": 5e-05,
"loss": 1.5146,
"step": 50
},
{
"epoch": 0.00425260472039124,
"grad_norm": 0.453125,
"learning_rate": 6e-05,
"loss": 1.5055,
"step": 60
},
{
"epoch": 0.00496137217378978,
"grad_norm": 0.515625,
"learning_rate": 7e-05,
"loss": 1.5345,
"step": 70
},
{
"epoch": 0.00567013962718832,
"grad_norm": 0.53515625,
"learning_rate": 8e-05,
"loss": 1.5133,
"step": 80
},
{
"epoch": 0.00637890708058686,
"grad_norm": 0.58984375,
"learning_rate": 9e-05,
"loss": 1.5043,
"step": 90
},
{
"epoch": 0.0070876745339854,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.513,
"step": 100
},
{
"epoch": 0.00779644198738394,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5083,
"step": 110
},
{
"epoch": 0.00850520944078248,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.5098,
"step": 120
},
{
"epoch": 0.00921397689418102,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4936,
"step": 130
},
{
"epoch": 0.00992274434757956,
"grad_norm": 0.54296875,
"learning_rate": 0.0001,
"loss": 1.5091,
"step": 140
},
{
"epoch": 0.0106315118009781,
"grad_norm": 0.439453125,
"learning_rate": 0.0001,
"loss": 1.5073,
"step": 150
},
{
"epoch": 0.01134027925437664,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5161,
"step": 160
},
{
"epoch": 0.01204904670777518,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.5067,
"step": 170
},
{
"epoch": 0.01275781416117372,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4984,
"step": 180
},
{
"epoch": 0.01346658161457226,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.5053,
"step": 190
},
{
"epoch": 0.0141753490679708,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.4937,
"step": 200
},
{
"epoch": 0.01488411652136934,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4946,
"step": 210
},
{
"epoch": 0.01559288397476788,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.5031,
"step": 220
},
{
"epoch": 0.01630165142816642,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.4857,
"step": 230
},
{
"epoch": 0.01701041888156496,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5134,
"step": 240
},
{
"epoch": 0.0177191863349635,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.5069,
"step": 250
},
{
"epoch": 0.01842795378836204,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5037,
"step": 260
},
{
"epoch": 0.01913672124176058,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4999,
"step": 270
},
{
"epoch": 0.01984548869515912,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.5038,
"step": 280
},
{
"epoch": 0.02055425614855766,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4985,
"step": 290
},
{
"epoch": 0.0212630236019562,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.5112,
"step": 300
},
{
"epoch": 0.02197179105535474,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.5083,
"step": 310
},
{
"epoch": 0.02268055850875328,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5104,
"step": 320
},
{
"epoch": 0.02338932596215182,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.5132,
"step": 330
},
{
"epoch": 0.02409809341555036,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 340
},
{
"epoch": 0.0248068608689489,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4949,
"step": 350
},
{
"epoch": 0.02551562832234744,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.5018,
"step": 360
},
{
"epoch": 0.02622439577574598,
"grad_norm": 0.435546875,
"learning_rate": 0.0001,
"loss": 1.4821,
"step": 370
},
{
"epoch": 0.02693316322914452,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5092,
"step": 380
},
{
"epoch": 0.02764193068254306,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4998,
"step": 390
},
{
"epoch": 0.0283506981359416,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4958,
"step": 400
},
{
"epoch": 0.02905946558934014,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.5031,
"step": 410
},
{
"epoch": 0.02976823304273868,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4929,
"step": 420
},
{
"epoch": 0.03047700049613722,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4987,
"step": 430
},
{
"epoch": 0.03118576794953576,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4853,
"step": 440
},
{
"epoch": 0.0318945354029343,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 450
},
{
"epoch": 0.03260330285633284,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4872,
"step": 460
},
{
"epoch": 0.03331207030973138,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.5061,
"step": 470
},
{
"epoch": 0.03402083776312992,
"grad_norm": 0.6484375,
"learning_rate": 0.0001,
"loss": 1.5078,
"step": 480
},
{
"epoch": 0.03472960521652846,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.504,
"step": 490
},
{
"epoch": 0.035438372669927,
"grad_norm": 0.470703125,
"learning_rate": 0.0001,
"loss": 1.5071,
"step": 500
},
{
"epoch": 0.035438372669927,
"eval_accuracy": 0.6646616911746595,
"eval_loss": 1.4113208055496216,
"eval_runtime": 7.1977,
"eval_samples_per_second": 48.21,
"eval_steps_per_second": 0.417,
"step": 500
},
{
"epoch": 0.03614714012332554,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.5028,
"step": 510
},
{
"epoch": 0.03685590757672408,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.5042,
"step": 520
},
{
"epoch": 0.03756467503012262,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4969,
"step": 530
},
{
"epoch": 0.03827344248352116,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.5016,
"step": 540
},
{
"epoch": 0.0389822099369197,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5008,
"step": 550
},
{
"epoch": 0.03969097739031824,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.5126,
"step": 560
},
{
"epoch": 0.04039974484371678,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4949,
"step": 570
},
{
"epoch": 0.04110851229711532,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4912,
"step": 580
},
{
"epoch": 0.04181727975051386,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4912,
"step": 590
},
{
"epoch": 0.0425260472039124,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 600
},
{
"epoch": 0.04323481465731094,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.4977,
"step": 610
},
{
"epoch": 0.04394358211070948,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4908,
"step": 620
},
{
"epoch": 0.04465234956410802,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4937,
"step": 630
},
{
"epoch": 0.04536111701750656,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 640
},
{
"epoch": 0.0460698844709051,
"grad_norm": 0.58203125,
"learning_rate": 0.0001,
"loss": 1.4982,
"step": 650
},
{
"epoch": 0.04677865192430364,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.4889,
"step": 660
},
{
"epoch": 0.04748741937770218,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4948,
"step": 670
},
{
"epoch": 0.04819618683110072,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.5004,
"step": 680
},
{
"epoch": 0.04890495428449926,
"grad_norm": 0.443359375,
"learning_rate": 0.0001,
"loss": 1.4888,
"step": 690
},
{
"epoch": 0.0496137217378978,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4981,
"step": 700
},
{
"epoch": 0.05032248919129634,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.504,
"step": 710
},
{
"epoch": 0.05103125664469488,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4909,
"step": 720
},
{
"epoch": 0.05174002409809342,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4971,
"step": 730
},
{
"epoch": 0.05244879155149196,
"grad_norm": 0.453125,
"learning_rate": 0.0001,
"loss": 1.4994,
"step": 740
},
{
"epoch": 0.0531575590048905,
"grad_norm": 0.60546875,
"learning_rate": 0.0001,
"loss": 1.4926,
"step": 750
},
{
"epoch": 0.05386632645828904,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 760
},
{
"epoch": 0.05457509391168758,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.5003,
"step": 770
},
{
"epoch": 0.05528386136508612,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4998,
"step": 780
},
{
"epoch": 0.05599262881848466,
"grad_norm": 0.59375,
"learning_rate": 0.0001,
"loss": 1.5005,
"step": 790
},
{
"epoch": 0.0567013962718832,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.509,
"step": 800
},
{
"epoch": 0.05741016372528174,
"grad_norm": 0.4375,
"learning_rate": 0.0001,
"loss": 1.4885,
"step": 810
},
{
"epoch": 0.05811893117868028,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4957,
"step": 820
},
{
"epoch": 0.05882769863207882,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5006,
"step": 830
},
{
"epoch": 0.05953646608547736,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.493,
"step": 840
},
{
"epoch": 0.0602452335388759,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.5008,
"step": 850
},
{
"epoch": 0.06095400099227444,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 860
},
{
"epoch": 0.06166276844567298,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5013,
"step": 870
},
{
"epoch": 0.06237153589907152,
"grad_norm": 0.4609375,
"learning_rate": 0.0001,
"loss": 1.4937,
"step": 880
},
{
"epoch": 0.06308030335247006,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4815,
"step": 890
},
{
"epoch": 0.0637890708058686,
"grad_norm": 0.44921875,
"learning_rate": 0.0001,
"loss": 1.4878,
"step": 900
},
{
"epoch": 0.06449783825926714,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4948,
"step": 910
},
{
"epoch": 0.06520660571266568,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.5051,
"step": 920
},
{
"epoch": 0.06591537316606422,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4993,
"step": 930
},
{
"epoch": 0.06662414061946276,
"grad_norm": 0.470703125,
"learning_rate": 0.0001,
"loss": 1.4984,
"step": 940
},
{
"epoch": 0.0673329080728613,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.5001,
"step": 950
},
{
"epoch": 0.06804167552625984,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4899,
"step": 960
},
{
"epoch": 0.06875044297965838,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4881,
"step": 970
},
{
"epoch": 0.06945921043305692,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.5096,
"step": 980
},
{
"epoch": 0.07016797788645546,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4902,
"step": 990
},
{
"epoch": 0.070876745339854,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.5003,
"step": 1000
},
{
"epoch": 0.070876745339854,
"eval_accuracy": 0.6648911127297294,
"eval_loss": 1.4080075025558472,
"eval_runtime": 7.1527,
"eval_samples_per_second": 48.513,
"eval_steps_per_second": 0.419,
"step": 1000
},
{
"epoch": 0.07158551279325254,
"grad_norm": 0.62109375,
"learning_rate": 0.0001,
"loss": 1.4882,
"step": 1010
},
{
"epoch": 0.07229428024665108,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 1020
},
{
"epoch": 0.07300304770004962,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.488,
"step": 1030
},
{
"epoch": 0.07371181515344816,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5065,
"step": 1040
},
{
"epoch": 0.0744205826068467,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4906,
"step": 1050
},
{
"epoch": 0.07512935006024524,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.5036,
"step": 1060
},
{
"epoch": 0.07583811751364378,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 1070
},
{
"epoch": 0.07654688496704232,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.5076,
"step": 1080
},
{
"epoch": 0.07725565242044086,
"grad_norm": 0.466796875,
"learning_rate": 0.0001,
"loss": 1.5008,
"step": 1090
},
{
"epoch": 0.0779644198738394,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.4951,
"step": 1100
},
{
"epoch": 0.07867318732723794,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4844,
"step": 1110
},
{
"epoch": 0.07938195478063648,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.499,
"step": 1120
},
{
"epoch": 0.08009072223403502,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4794,
"step": 1130
},
{
"epoch": 0.08079948968743356,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.508,
"step": 1140
},
{
"epoch": 0.0815082571408321,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4997,
"step": 1150
},
{
"epoch": 0.08221702459423064,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.4987,
"step": 1160
},
{
"epoch": 0.08292579204762918,
"grad_norm": 0.578125,
"learning_rate": 0.0001,
"loss": 1.5028,
"step": 1170
},
{
"epoch": 0.08363455950102772,
"grad_norm": 0.453125,
"learning_rate": 0.0001,
"loss": 1.4991,
"step": 1180
},
{
"epoch": 0.08434332695442626,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.5075,
"step": 1190
},
{
"epoch": 0.0850520944078248,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.4845,
"step": 1200
},
{
"epoch": 0.08576086186122334,
"grad_norm": 0.5703125,
"learning_rate": 0.0001,
"loss": 1.4993,
"step": 1210
},
{
"epoch": 0.08646962931462188,
"grad_norm": 0.58984375,
"learning_rate": 0.0001,
"loss": 1.502,
"step": 1220
},
{
"epoch": 0.08717839676802042,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.4859,
"step": 1230
},
{
"epoch": 0.08788716422141896,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.5029,
"step": 1240
},
{
"epoch": 0.0885959316748175,
"grad_norm": 0.44921875,
"learning_rate": 0.0001,
"loss": 1.5035,
"step": 1250
},
{
"epoch": 0.08930469912821604,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.5057,
"step": 1260
},
{
"epoch": 0.09001346658161458,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4862,
"step": 1270
},
{
"epoch": 0.09072223403501312,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4941,
"step": 1280
},
{
"epoch": 0.09143100148841166,
"grad_norm": 0.427734375,
"learning_rate": 0.0001,
"loss": 1.4995,
"step": 1290
},
{
"epoch": 0.0921397689418102,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4891,
"step": 1300
},
{
"epoch": 0.09284853639520874,
"grad_norm": 0.4296875,
"learning_rate": 0.0001,
"loss": 1.5021,
"step": 1310
},
{
"epoch": 0.09355730384860728,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.4896,
"step": 1320
},
{
"epoch": 0.09426607130200582,
"grad_norm": 0.58203125,
"learning_rate": 0.0001,
"loss": 1.4889,
"step": 1330
},
{
"epoch": 0.09497483875540436,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4978,
"step": 1340
},
{
"epoch": 0.0956836062088029,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.505,
"step": 1350
},
{
"epoch": 0.09639237366220144,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4934,
"step": 1360
},
{
"epoch": 0.09710114111559998,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 1370
},
{
"epoch": 0.09780990856899852,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.4973,
"step": 1380
},
{
"epoch": 0.09851867602239706,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4906,
"step": 1390
},
{
"epoch": 0.0992274434757956,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4942,
"step": 1400
},
{
"epoch": 0.09993621092919414,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4881,
"step": 1410
},
{
"epoch": 0.10064497838259268,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.4837,
"step": 1420
},
{
"epoch": 0.10135374583599122,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4954,
"step": 1430
},
{
"epoch": 0.10206251328938976,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4838,
"step": 1440
},
{
"epoch": 0.1027712807427883,
"grad_norm": 0.62109375,
"learning_rate": 0.0001,
"loss": 1.4998,
"step": 1450
},
{
"epoch": 0.10348004819618684,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4998,
"step": 1460
},
{
"epoch": 0.10418881564958538,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4876,
"step": 1470
},
{
"epoch": 0.10489758310298392,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.4965,
"step": 1480
},
{
"epoch": 0.10560635055638246,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4875,
"step": 1490
},
{
"epoch": 0.106315118009781,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4959,
"step": 1500
},
{
"epoch": 0.106315118009781,
"eval_accuracy": 0.6653851431949415,
"eval_loss": 1.4062947034835815,
"eval_runtime": 7.3356,
"eval_samples_per_second": 47.304,
"eval_steps_per_second": 0.409,
"step": 1500
},
{
"epoch": 0.10702388546317954,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4954,
"step": 1510
},
{
"epoch": 0.10773265291657808,
"grad_norm": 0.59375,
"learning_rate": 0.0001,
"loss": 1.4988,
"step": 1520
},
{
"epoch": 0.10844142036997662,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.4895,
"step": 1530
},
{
"epoch": 0.10915018782337516,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.5043,
"step": 1540
},
{
"epoch": 0.1098589552767737,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4746,
"step": 1550
},
{
"epoch": 0.11056772273017224,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4898,
"step": 1560
},
{
"epoch": 0.11127649018357078,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4876,
"step": 1570
},
{
"epoch": 0.11198525763696932,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 1580
},
{
"epoch": 0.11269402509036786,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.5048,
"step": 1590
},
{
"epoch": 0.1134027925437664,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.482,
"step": 1600
},
{
"epoch": 0.11411155999716494,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4951,
"step": 1610
},
{
"epoch": 0.11482032745056348,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.485,
"step": 1620
},
{
"epoch": 0.11552909490396202,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.491,
"step": 1630
},
{
"epoch": 0.11623786235736056,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4785,
"step": 1640
},
{
"epoch": 0.1169466298107591,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4971,
"step": 1650
},
{
"epoch": 0.11765539726415764,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.5013,
"step": 1660
},
{
"epoch": 0.11836416471755618,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.5077,
"step": 1670
},
{
"epoch": 0.11907293217095472,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4939,
"step": 1680
},
{
"epoch": 0.11978169962435326,
"grad_norm": 0.63671875,
"learning_rate": 0.0001,
"loss": 1.4829,
"step": 1690
},
{
"epoch": 0.1204904670777518,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4964,
"step": 1700
},
{
"epoch": 0.12119923453115033,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4969,
"step": 1710
},
{
"epoch": 0.12190800198454887,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4955,
"step": 1720
},
{
"epoch": 0.12261676943794741,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4913,
"step": 1730
},
{
"epoch": 0.12332553689134595,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4905,
"step": 1740
},
{
"epoch": 0.1240343043447445,
"grad_norm": 0.4609375,
"learning_rate": 0.0001,
"loss": 1.5034,
"step": 1750
},
{
"epoch": 0.12474307179814303,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4991,
"step": 1760
},
{
"epoch": 0.12545183925154157,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 1770
},
{
"epoch": 0.12616060670494011,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4943,
"step": 1780
},
{
"epoch": 0.12686937415833865,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4971,
"step": 1790
},
{
"epoch": 0.1275781416117372,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4961,
"step": 1800
},
{
"epoch": 0.12828690906513573,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4888,
"step": 1810
},
{
"epoch": 0.12899567651853427,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4974,
"step": 1820
},
{
"epoch": 0.12970444397193281,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4866,
"step": 1830
},
{
"epoch": 0.13041321142533135,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4878,
"step": 1840
},
{
"epoch": 0.1311219788787299,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4855,
"step": 1850
},
{
"epoch": 0.13183074633212843,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.503,
"step": 1860
},
{
"epoch": 0.13253951378552697,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4955,
"step": 1870
},
{
"epoch": 0.13324828123892551,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 1880
},
{
"epoch": 0.13395704869232405,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4922,
"step": 1890
},
{
"epoch": 0.1346658161457226,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5161,
"step": 1900
},
{
"epoch": 0.13537458359912113,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4909,
"step": 1910
},
{
"epoch": 0.13608335105251967,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4936,
"step": 1920
},
{
"epoch": 0.13679211850591821,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.4882,
"step": 1930
},
{
"epoch": 0.13750088595931675,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.4745,
"step": 1940
},
{
"epoch": 0.1382096534127153,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4964,
"step": 1950
},
{
"epoch": 0.13891842086611383,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.5041,
"step": 1960
},
{
"epoch": 0.13962718831951237,
"grad_norm": 0.62890625,
"learning_rate": 0.0001,
"loss": 1.4916,
"step": 1970
},
{
"epoch": 0.14033595577291091,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4992,
"step": 1980
},
{
"epoch": 0.14104472322630945,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.5001,
"step": 1990
},
{
"epoch": 0.141753490679708,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 2000
},
{
"epoch": 0.141753490679708,
"eval_accuracy": 0.6655364488217514,
"eval_loss": 1.4053725004196167,
"eval_runtime": 7.1455,
"eval_samples_per_second": 48.562,
"eval_steps_per_second": 0.42,
"step": 2000
},
{
"epoch": 0.14246225813310653,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.5015,
"step": 2010
},
{
"epoch": 0.14317102558650507,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4842,
"step": 2020
},
{
"epoch": 0.14387979303990361,
"grad_norm": 0.466796875,
"learning_rate": 0.0001,
"loss": 1.4868,
"step": 2030
},
{
"epoch": 0.14458856049330215,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4947,
"step": 2040
},
{
"epoch": 0.1452973279467007,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4964,
"step": 2050
},
{
"epoch": 0.14600609540009923,
"grad_norm": 0.578125,
"learning_rate": 0.0001,
"loss": 1.5029,
"step": 2060
},
{
"epoch": 0.14671486285349777,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.4974,
"step": 2070
},
{
"epoch": 0.1474236303068963,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.488,
"step": 2080
},
{
"epoch": 0.14813239776029485,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5008,
"step": 2090
},
{
"epoch": 0.1488411652136934,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5018,
"step": 2100
},
{
"epoch": 0.14954993266709193,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.5048,
"step": 2110
},
{
"epoch": 0.15025870012049047,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.5077,
"step": 2120
},
{
"epoch": 0.150967467573889,
"grad_norm": 0.466796875,
"learning_rate": 0.0001,
"loss": 1.4962,
"step": 2130
},
{
"epoch": 0.15167623502728755,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 2140
},
{
"epoch": 0.1523850024806861,
"grad_norm": 0.6171875,
"learning_rate": 0.0001,
"loss": 1.4926,
"step": 2150
},
{
"epoch": 0.15309376993408463,
"grad_norm": 0.640625,
"learning_rate": 0.0001,
"loss": 1.4895,
"step": 2160
},
{
"epoch": 0.15380253738748317,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4877,
"step": 2170
},
{
"epoch": 0.1545113048408817,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.4981,
"step": 2180
},
{
"epoch": 0.15522007229428025,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4947,
"step": 2190
},
{
"epoch": 0.1559288397476788,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.4891,
"step": 2200
},
{
"epoch": 0.15663760720107733,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.5004,
"step": 2210
},
{
"epoch": 0.15734637465447587,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4877,
"step": 2220
},
{
"epoch": 0.1580551421078744,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 2230
},
{
"epoch": 0.15876390956127295,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4934,
"step": 2240
},
{
"epoch": 0.1594726770146715,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4981,
"step": 2250
},
{
"epoch": 0.16018144446807003,
"grad_norm": 0.6328125,
"learning_rate": 0.0001,
"loss": 1.5087,
"step": 2260
},
{
"epoch": 0.16089021192146857,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4989,
"step": 2270
},
{
"epoch": 0.1615989793748671,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4785,
"step": 2280
},
{
"epoch": 0.16230774682826565,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4978,
"step": 2290
},
{
"epoch": 0.1630165142816642,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4931,
"step": 2300
},
{
"epoch": 0.16372528173506273,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.5119,
"step": 2310
},
{
"epoch": 0.16443404918846127,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5097,
"step": 2320
},
{
"epoch": 0.1651428166418598,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.481,
"step": 2330
},
{
"epoch": 0.16585158409525835,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4884,
"step": 2340
},
{
"epoch": 0.1665603515486569,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4934,
"step": 2350
},
{
"epoch": 0.16726911900205543,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4789,
"step": 2360
},
{
"epoch": 0.16797788645545397,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.5063,
"step": 2370
},
{
"epoch": 0.1686866539088525,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5095,
"step": 2380
},
{
"epoch": 0.16939542136225105,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4949,
"step": 2390
},
{
"epoch": 0.1701041888156496,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4918,
"step": 2400
},
{
"epoch": 0.17081295626904813,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.5029,
"step": 2410
},
{
"epoch": 0.17152172372244667,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 2420
},
{
"epoch": 0.1722304911758452,
"grad_norm": 0.6171875,
"learning_rate": 0.0001,
"loss": 1.4933,
"step": 2430
},
{
"epoch": 0.17293925862924375,
"grad_norm": 0.61328125,
"learning_rate": 0.0001,
"loss": 1.4949,
"step": 2440
},
{
"epoch": 0.1736480260826423,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.5043,
"step": 2450
},
{
"epoch": 0.17435679353604083,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4971,
"step": 2460
},
{
"epoch": 0.17506556098943937,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.49,
"step": 2470
},
{
"epoch": 0.1757743284428379,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4892,
"step": 2480
},
{
"epoch": 0.17648309589623645,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5006,
"step": 2490
},
{
"epoch": 0.177191863349635,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4891,
"step": 2500
},
{
"epoch": 0.177191863349635,
"eval_accuracy": 0.6656455296224749,
"eval_loss": 1.4047455787658691,
"eval_runtime": 7.1711,
"eval_samples_per_second": 48.388,
"eval_steps_per_second": 0.418,
"step": 2500
},
{
"epoch": 0.17790063080303353,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4883,
"step": 2510
},
{
"epoch": 0.17860939825643207,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4906,
"step": 2520
},
{
"epoch": 0.1793181657098306,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4946,
"step": 2530
},
{
"epoch": 0.18002693316322915,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4933,
"step": 2540
},
{
"epoch": 0.1807357006166277,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.4794,
"step": 2550
},
{
"epoch": 0.18144446807002623,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 2560
},
{
"epoch": 0.18215323552342477,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 2570
},
{
"epoch": 0.1828620029768233,
"grad_norm": 0.4609375,
"learning_rate": 0.0001,
"loss": 1.5043,
"step": 2580
},
{
"epoch": 0.18357077043022185,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.4928,
"step": 2590
},
{
"epoch": 0.1842795378836204,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5021,
"step": 2600
},
{
"epoch": 0.18498830533701893,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5033,
"step": 2610
},
{
"epoch": 0.18569707279041747,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4875,
"step": 2620
},
{
"epoch": 0.186405840243816,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4913,
"step": 2630
},
{
"epoch": 0.18711460769721455,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4929,
"step": 2640
},
{
"epoch": 0.1878233751506131,
"grad_norm": 0.4609375,
"learning_rate": 0.0001,
"loss": 1.4948,
"step": 2650
},
{
"epoch": 0.18853214260401163,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.5015,
"step": 2660
},
{
"epoch": 0.18924091005741017,
"grad_norm": 0.470703125,
"learning_rate": 0.0001,
"loss": 1.4996,
"step": 2670
},
{
"epoch": 0.1899496775108087,
"grad_norm": 0.431640625,
"learning_rate": 0.0001,
"loss": 1.487,
"step": 2680
},
{
"epoch": 0.19065844496420725,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4944,
"step": 2690
},
{
"epoch": 0.1913672124176058,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4889,
"step": 2700
},
{
"epoch": 0.19207597987100433,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4996,
"step": 2710
},
{
"epoch": 0.19278474732440287,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4908,
"step": 2720
},
{
"epoch": 0.1934935147778014,
"grad_norm": 0.4453125,
"learning_rate": 0.0001,
"loss": 1.4877,
"step": 2730
},
{
"epoch": 0.19420228223119995,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4824,
"step": 2740
},
{
"epoch": 0.1949110496845985,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4925,
"step": 2750
},
{
"epoch": 0.19561981713799703,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4895,
"step": 2760
},
{
"epoch": 0.19632858459139557,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.489,
"step": 2770
},
{
"epoch": 0.1970373520447941,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 2780
},
{
"epoch": 0.19774611949819265,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.4985,
"step": 2790
},
{
"epoch": 0.1984548869515912,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.482,
"step": 2800
},
{
"epoch": 0.19916365440498973,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 2810
},
{
"epoch": 0.19987242185838827,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4914,
"step": 2820
},
{
"epoch": 0.2005811893117868,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4989,
"step": 2830
},
{
"epoch": 0.20128995676518535,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.4927,
"step": 2840
},
{
"epoch": 0.2019987242185839,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.496,
"step": 2850
},
{
"epoch": 0.20270749167198243,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4828,
"step": 2860
},
{
"epoch": 0.20341625912538097,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 2870
},
{
"epoch": 0.2041250265787795,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4906,
"step": 2880
},
{
"epoch": 0.20483379403217805,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.5005,
"step": 2890
},
{
"epoch": 0.2055425614855766,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.5009,
"step": 2900
},
{
"epoch": 0.20625132893897513,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4862,
"step": 2910
},
{
"epoch": 0.20696009639237367,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4833,
"step": 2920
},
{
"epoch": 0.2076688638457722,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4975,
"step": 2930
},
{
"epoch": 0.20837763129917075,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4992,
"step": 2940
},
{
"epoch": 0.2090863987525693,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.4878,
"step": 2950
},
{
"epoch": 0.20979516620596783,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.4978,
"step": 2960
},
{
"epoch": 0.21050393365936637,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.484,
"step": 2970
},
{
"epoch": 0.2112127011127649,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.5065,
"step": 2980
},
{
"epoch": 0.21192146856616345,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4829,
"step": 2990
},
{
"epoch": 0.212630236019562,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4916,
"step": 3000
},
{
"epoch": 0.212630236019562,
"eval_accuracy": 0.6657292755275465,
"eval_loss": 1.4040066003799438,
"eval_runtime": 7.1205,
"eval_samples_per_second": 48.733,
"eval_steps_per_second": 0.421,
"step": 3000
},
{
"epoch": 0.21333900347296053,
"grad_norm": 0.44140625,
"learning_rate": 0.0001,
"loss": 1.4859,
"step": 3010
},
{
"epoch": 0.21404777092635907,
"grad_norm": 0.54296875,
"learning_rate": 0.0001,
"loss": 1.4907,
"step": 3020
},
{
"epoch": 0.2147565383797576,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4771,
"step": 3030
},
{
"epoch": 0.21546530583315615,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4868,
"step": 3040
},
{
"epoch": 0.2161740732865547,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.4941,
"step": 3050
},
{
"epoch": 0.21688284073995323,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4951,
"step": 3060
},
{
"epoch": 0.21759160819335177,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4915,
"step": 3070
},
{
"epoch": 0.2183003756467503,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4869,
"step": 3080
},
{
"epoch": 0.21900914310014885,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5057,
"step": 3090
},
{
"epoch": 0.2197179105535474,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4941,
"step": 3100
},
{
"epoch": 0.22042667800694593,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.5017,
"step": 3110
},
{
"epoch": 0.22113544546034447,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.511,
"step": 3120
},
{
"epoch": 0.221844212913743,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4903,
"step": 3130
},
{
"epoch": 0.22255298036714155,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.5098,
"step": 3140
},
{
"epoch": 0.2232617478205401,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4989,
"step": 3150
},
{
"epoch": 0.22397051527393863,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.4935,
"step": 3160
},
{
"epoch": 0.22467928272733717,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4907,
"step": 3170
},
{
"epoch": 0.2253880501807357,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.5033,
"step": 3180
},
{
"epoch": 0.22609681763413425,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4982,
"step": 3190
},
{
"epoch": 0.2268055850875328,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4853,
"step": 3200
},
{
"epoch": 0.22751435254093133,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.498,
"step": 3210
},
{
"epoch": 0.22822311999432987,
"grad_norm": 0.6171875,
"learning_rate": 0.0001,
"loss": 1.489,
"step": 3220
},
{
"epoch": 0.2289318874477284,
"grad_norm": 0.435546875,
"learning_rate": 0.0001,
"loss": 1.4758,
"step": 3230
},
{
"epoch": 0.22964065490112695,
"grad_norm": 0.69921875,
"learning_rate": 0.0001,
"loss": 1.4896,
"step": 3240
},
{
"epoch": 0.2303494223545255,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.479,
"step": 3250
},
{
"epoch": 0.23105818980792403,
"grad_norm": 0.478515625,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 3260
},
{
"epoch": 0.23176695726132257,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4913,
"step": 3270
},
{
"epoch": 0.2324757247147211,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4856,
"step": 3280
},
{
"epoch": 0.23318449216811965,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4969,
"step": 3290
},
{
"epoch": 0.2338932596215182,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 3300
},
{
"epoch": 0.23460202707491673,
"grad_norm": 0.4453125,
"learning_rate": 0.0001,
"loss": 1.4904,
"step": 3310
},
{
"epoch": 0.23531079452831527,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.484,
"step": 3320
},
{
"epoch": 0.2360195619817138,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4928,
"step": 3330
},
{
"epoch": 0.23672832943511235,
"grad_norm": 0.54296875,
"learning_rate": 0.0001,
"loss": 1.4964,
"step": 3340
},
{
"epoch": 0.2374370968885109,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4974,
"step": 3350
},
{
"epoch": 0.23814586434190943,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.503,
"step": 3360
},
{
"epoch": 0.23885463179530797,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.5029,
"step": 3370
},
{
"epoch": 0.2395633992487065,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.501,
"step": 3380
},
{
"epoch": 0.24027216670210505,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4885,
"step": 3390
},
{
"epoch": 0.2409809341555036,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4904,
"step": 3400
},
{
"epoch": 0.24168970160890213,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4851,
"step": 3410
},
{
"epoch": 0.24239846906230067,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4783,
"step": 3420
},
{
"epoch": 0.2431072365156992,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4948,
"step": 3430
},
{
"epoch": 0.24381600396909775,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4754,
"step": 3440
},
{
"epoch": 0.2445247714224963,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4936,
"step": 3450
},
{
"epoch": 0.24523353887589483,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.4814,
"step": 3460
},
{
"epoch": 0.24594230632929337,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.505,
"step": 3470
},
{
"epoch": 0.2466510737826919,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4888,
"step": 3480
},
{
"epoch": 0.24735984123609045,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4974,
"step": 3490
},
{
"epoch": 0.248068608689489,
"grad_norm": 0.4453125,
"learning_rate": 0.0001,
"loss": 1.496,
"step": 3500
},
{
"epoch": 0.248068608689489,
"eval_accuracy": 0.665728571780445,
"eval_loss": 1.403409481048584,
"eval_runtime": 7.1308,
"eval_samples_per_second": 48.662,
"eval_steps_per_second": 0.421,
"step": 3500
},
{
"epoch": 0.24877737614288753,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4988,
"step": 3510
},
{
"epoch": 0.24948614359628607,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.5036,
"step": 3520
},
{
"epoch": 0.2501949110496846,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.5038,
"step": 3530
},
{
"epoch": 0.25090367850308315,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4947,
"step": 3540
},
{
"epoch": 0.2516124459564817,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4914,
"step": 3550
},
{
"epoch": 0.25232121340988023,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4855,
"step": 3560
},
{
"epoch": 0.25302998086327877,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4845,
"step": 3570
},
{
"epoch": 0.2537387483166773,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.5041,
"step": 3580
},
{
"epoch": 0.25444751577007585,
"grad_norm": 0.5703125,
"learning_rate": 0.0001,
"loss": 1.4803,
"step": 3590
},
{
"epoch": 0.2551562832234744,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4938,
"step": 3600
},
{
"epoch": 0.25586505067687293,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.4938,
"step": 3610
},
{
"epoch": 0.25657381813027147,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.4972,
"step": 3620
},
{
"epoch": 0.25728258558367,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.4941,
"step": 3630
},
{
"epoch": 0.25799135303706855,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4881,
"step": 3640
},
{
"epoch": 0.2587001204904671,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4906,
"step": 3650
},
{
"epoch": 0.25940888794386563,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4937,
"step": 3660
},
{
"epoch": 0.26011765539726417,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4904,
"step": 3670
},
{
"epoch": 0.2608264228506627,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5034,
"step": 3680
},
{
"epoch": 0.26153519030406125,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4727,
"step": 3690
},
{
"epoch": 0.2622439577574598,
"grad_norm": 0.421875,
"learning_rate": 0.0001,
"loss": 1.4919,
"step": 3700
},
{
"epoch": 0.26295272521085833,
"grad_norm": 0.43359375,
"learning_rate": 0.0001,
"loss": 1.4842,
"step": 3710
},
{
"epoch": 0.26366149266425687,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.4953,
"step": 3720
},
{
"epoch": 0.2643702601176554,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4739,
"step": 3730
},
{
"epoch": 0.26507902757105395,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.4913,
"step": 3740
},
{
"epoch": 0.2657877950244525,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.5003,
"step": 3750
},
{
"epoch": 0.26649656247785103,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4919,
"step": 3760
},
{
"epoch": 0.26720532993124957,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4818,
"step": 3770
},
{
"epoch": 0.2679140973846481,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.5003,
"step": 3780
},
{
"epoch": 0.26862286483804665,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4934,
"step": 3790
},
{
"epoch": 0.2693316322914452,
"grad_norm": 0.59375,
"learning_rate": 0.0001,
"loss": 1.4869,
"step": 3800
},
{
"epoch": 0.27004039974484373,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4896,
"step": 3810
},
{
"epoch": 0.27074916719824227,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.5021,
"step": 3820
},
{
"epoch": 0.2714579346516408,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5048,
"step": 3830
},
{
"epoch": 0.27216670210503935,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4963,
"step": 3840
},
{
"epoch": 0.2728754695584379,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4967,
"step": 3850
},
{
"epoch": 0.27358423701183643,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.5036,
"step": 3860
},
{
"epoch": 0.27429300446523497,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4944,
"step": 3870
},
{
"epoch": 0.2750017719186335,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.4951,
"step": 3880
},
{
"epoch": 0.27571053937203205,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.5006,
"step": 3890
},
{
"epoch": 0.2764193068254306,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.4886,
"step": 3900
},
{
"epoch": 0.27712807427882913,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4931,
"step": 3910
},
{
"epoch": 0.27783684173222767,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4926,
"step": 3920
},
{
"epoch": 0.2785456091856262,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4962,
"step": 3930
},
{
"epoch": 0.27925437663902475,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4933,
"step": 3940
},
{
"epoch": 0.2799631440924233,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4959,
"step": 3950
},
{
"epoch": 0.28067191154582183,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.49,
"step": 3960
},
{
"epoch": 0.28138067899922037,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.5087,
"step": 3970
},
{
"epoch": 0.2820894464526189,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4939,
"step": 3980
},
{
"epoch": 0.28279821390601745,
"grad_norm": 0.4375,
"learning_rate": 0.0001,
"loss": 1.4913,
"step": 3990
},
{
"epoch": 0.283506981359416,
"grad_norm": 0.443359375,
"learning_rate": 0.0001,
"loss": 1.495,
"step": 4000
},
{
"epoch": 0.283506981359416,
"eval_accuracy": 0.6657313867688508,
"eval_loss": 1.403212308883667,
"eval_runtime": 7.176,
"eval_samples_per_second": 48.355,
"eval_steps_per_second": 0.418,
"step": 4000
},
{
"epoch": 0.28421574881281453,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.4793,
"step": 4010
},
{
"epoch": 0.28492451626621307,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.484,
"step": 4020
},
{
"epoch": 0.2856332837196116,
"grad_norm": 0.578125,
"learning_rate": 0.0001,
"loss": 1.4967,
"step": 4030
},
{
"epoch": 0.28634205117301015,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4952,
"step": 4040
},
{
"epoch": 0.2870508186264087,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.494,
"step": 4050
},
{
"epoch": 0.28775958607980723,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4888,
"step": 4060
},
{
"epoch": 0.28846835353320577,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4784,
"step": 4070
},
{
"epoch": 0.2891771209866043,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4924,
"step": 4080
},
{
"epoch": 0.28988588844000285,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5047,
"step": 4090
},
{
"epoch": 0.2905946558934014,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4895,
"step": 4100
},
{
"epoch": 0.29130342334679993,
"grad_norm": 0.466796875,
"learning_rate": 0.0001,
"loss": 1.4852,
"step": 4110
},
{
"epoch": 0.29201219080019847,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.5075,
"step": 4120
},
{
"epoch": 0.292720958253597,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.5061,
"step": 4130
},
{
"epoch": 0.29342972570699555,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4953,
"step": 4140
},
{
"epoch": 0.2941384931603941,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 4150
},
{
"epoch": 0.2948472606137926,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4978,
"step": 4160
},
{
"epoch": 0.29555602806719117,
"grad_norm": 0.7421875,
"learning_rate": 0.0001,
"loss": 1.4759,
"step": 4170
},
{
"epoch": 0.2962647955205897,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4955,
"step": 4180
},
{
"epoch": 0.29697356297398825,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.4955,
"step": 4190
},
{
"epoch": 0.2976823304273868,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.482,
"step": 4200
},
{
"epoch": 0.2983910978807853,
"grad_norm": 0.455078125,
"learning_rate": 0.0001,
"loss": 1.4774,
"step": 4210
},
{
"epoch": 0.29909986533418387,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.484,
"step": 4220
},
{
"epoch": 0.2998086327875824,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.5073,
"step": 4230
},
{
"epoch": 0.30051740024098095,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.5047,
"step": 4240
},
{
"epoch": 0.3012261676943795,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4898,
"step": 4250
},
{
"epoch": 0.301934935147778,
"grad_norm": 0.4453125,
"learning_rate": 0.0001,
"loss": 1.4914,
"step": 4260
},
{
"epoch": 0.30264370260117657,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4838,
"step": 4270
},
{
"epoch": 0.3033524700545751,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4908,
"step": 4280
},
{
"epoch": 0.30406123750797365,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.4838,
"step": 4290
},
{
"epoch": 0.3047700049613722,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4871,
"step": 4300
},
{
"epoch": 0.3054787724147707,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.5091,
"step": 4310
},
{
"epoch": 0.30618753986816927,
"grad_norm": 0.4296875,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 4320
},
{
"epoch": 0.3068963073215678,
"grad_norm": 0.5703125,
"learning_rate": 0.0001,
"loss": 1.4926,
"step": 4330
},
{
"epoch": 0.30760507477496635,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4982,
"step": 4340
},
{
"epoch": 0.3083138422283649,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.5001,
"step": 4350
},
{
"epoch": 0.3090226096817634,
"grad_norm": 0.43359375,
"learning_rate": 0.0001,
"loss": 1.4939,
"step": 4360
},
{
"epoch": 0.30973137713516197,
"grad_norm": 0.49609375,
"learning_rate": 0.0001,
"loss": 1.4979,
"step": 4370
},
{
"epoch": 0.3104401445885605,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4799,
"step": 4380
},
{
"epoch": 0.31114891204195905,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 4390
},
{
"epoch": 0.3118576794953576,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.4948,
"step": 4400
},
{
"epoch": 0.3125664469487561,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4942,
"step": 4410
},
{
"epoch": 0.31327521440215467,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.5039,
"step": 4420
},
{
"epoch": 0.3139839818555532,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4882,
"step": 4430
},
{
"epoch": 0.31469274930895175,
"grad_norm": 0.4296875,
"learning_rate": 0.0001,
"loss": 1.4925,
"step": 4440
},
{
"epoch": 0.3154015167623503,
"grad_norm": 0.65625,
"learning_rate": 0.0001,
"loss": 1.5131,
"step": 4450
},
{
"epoch": 0.3161102842157488,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.5026,
"step": 4460
},
{
"epoch": 0.31681905166914737,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4959,
"step": 4470
},
{
"epoch": 0.3175278191225459,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.4972,
"step": 4480
},
{
"epoch": 0.31823658657594445,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4922,
"step": 4490
},
{
"epoch": 0.318945354029343,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4934,
"step": 4500
},
{
"epoch": 0.318945354029343,
"eval_accuracy": 0.6658446900521828,
"eval_loss": 1.4029836654663086,
"eval_runtime": 7.1758,
"eval_samples_per_second": 48.357,
"eval_steps_per_second": 0.418,
"step": 4500
},
{
"epoch": 0.3196541214827415,
"grad_norm": 0.57421875,
"learning_rate": 0.0001,
"loss": 1.4693,
"step": 4510
},
{
"epoch": 0.32036288893614007,
"grad_norm": 0.486328125,
"learning_rate": 0.0001,
"loss": 1.498,
"step": 4520
},
{
"epoch": 0.3210716563895386,
"grad_norm": 0.462890625,
"learning_rate": 0.0001,
"loss": 1.4872,
"step": 4530
},
{
"epoch": 0.32178042384293715,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.4866,
"step": 4540
},
{
"epoch": 0.3224891912963357,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4919,
"step": 4550
},
{
"epoch": 0.3231979587497342,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.4765,
"step": 4560
},
{
"epoch": 0.32390672620313277,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4919,
"step": 4570
},
{
"epoch": 0.3246154936565313,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.5069,
"step": 4580
},
{
"epoch": 0.32532426110992985,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.5014,
"step": 4590
},
{
"epoch": 0.3260330285633284,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.5081,
"step": 4600
},
{
"epoch": 0.3267417960167269,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.5007,
"step": 4610
},
{
"epoch": 0.32745056347012547,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.4755,
"step": 4620
},
{
"epoch": 0.328159330923524,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4875,
"step": 4630
},
{
"epoch": 0.32886809837692255,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4871,
"step": 4640
},
{
"epoch": 0.3295768658303211,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.5034,
"step": 4650
},
{
"epoch": 0.3302856332837196,
"grad_norm": 0.470703125,
"learning_rate": 0.0001,
"loss": 1.4828,
"step": 4660
},
{
"epoch": 0.33099440073711817,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.489,
"step": 4670
},
{
"epoch": 0.3317031681905167,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.5142,
"step": 4680
},
{
"epoch": 0.33241193564391525,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4918,
"step": 4690
},
{
"epoch": 0.3331207030973138,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4862,
"step": 4700
},
{
"epoch": 0.3338294705507123,
"grad_norm": 0.61328125,
"learning_rate": 0.0001,
"loss": 1.4979,
"step": 4710
},
{
"epoch": 0.33453823800411087,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4968,
"step": 4720
},
{
"epoch": 0.3352470054575094,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4929,
"step": 4730
},
{
"epoch": 0.33595577291090795,
"grad_norm": 0.54296875,
"learning_rate": 0.0001,
"loss": 1.4713,
"step": 4740
},
{
"epoch": 0.3366645403643065,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4866,
"step": 4750
},
{
"epoch": 0.337373307817705,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4993,
"step": 4760
},
{
"epoch": 0.33808207527110357,
"grad_norm": 0.58203125,
"learning_rate": 0.0001,
"loss": 1.4966,
"step": 4770
},
{
"epoch": 0.3387908427245021,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.5022,
"step": 4780
},
{
"epoch": 0.33949961017790065,
"grad_norm": 0.453125,
"learning_rate": 0.0001,
"loss": 1.4908,
"step": 4790
},
{
"epoch": 0.3402083776312992,
"grad_norm": 0.66015625,
"learning_rate": 0.0001,
"loss": 1.4884,
"step": 4800
},
{
"epoch": 0.3409171450846977,
"grad_norm": 0.44140625,
"learning_rate": 0.0001,
"loss": 1.5059,
"step": 4810
},
{
"epoch": 0.34162591253809627,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.4907,
"step": 4820
},
{
"epoch": 0.3423346799914948,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4842,
"step": 4830
},
{
"epoch": 0.34304344744489335,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4984,
"step": 4840
},
{
"epoch": 0.3437522148982919,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4828,
"step": 4850
},
{
"epoch": 0.3444609823516904,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.4865,
"step": 4860
},
{
"epoch": 0.34516974980508897,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.5001,
"step": 4870
},
{
"epoch": 0.3458785172584875,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.5026,
"step": 4880
},
{
"epoch": 0.34658728471188605,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.488,
"step": 4890
},
{
"epoch": 0.3472960521652846,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4846,
"step": 4900
},
{
"epoch": 0.3480048196186831,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4972,
"step": 4910
},
{
"epoch": 0.34871358707208167,
"grad_norm": 0.46484375,
"learning_rate": 0.0001,
"loss": 1.487,
"step": 4920
},
{
"epoch": 0.3494223545254802,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 4930
},
{
"epoch": 0.35013112197887875,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4942,
"step": 4940
},
{
"epoch": 0.3508398894322773,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4944,
"step": 4950
},
{
"epoch": 0.3515486568856758,
"grad_norm": 0.59375,
"learning_rate": 0.0001,
"loss": 1.5099,
"step": 4960
},
{
"epoch": 0.35225742433907437,
"grad_norm": 0.494140625,
"learning_rate": 0.0001,
"loss": 1.4951,
"step": 4970
},
{
"epoch": 0.3529661917924729,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.5049,
"step": 4980
},
{
"epoch": 0.35367495924587145,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4764,
"step": 4990
},
{
"epoch": 0.35438372669927,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4849,
"step": 5000
},
{
"epoch": 0.35438372669927,
"eval_accuracy": 0.6660431467347894,
"eval_loss": 1.4028522968292236,
"eval_runtime": 7.1426,
"eval_samples_per_second": 48.581,
"eval_steps_per_second": 0.42,
"step": 5000
},
{
"epoch": 0.3550924941526685,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.4884,
"step": 5010
},
{
"epoch": 0.35580126160606707,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5002,
"step": 5020
},
{
"epoch": 0.3565100290594656,
"grad_norm": 0.498046875,
"learning_rate": 0.0001,
"loss": 1.5019,
"step": 5030
},
{
"epoch": 0.35721879651286415,
"grad_norm": 0.44921875,
"learning_rate": 0.0001,
"loss": 1.4813,
"step": 5040
},
{
"epoch": 0.3579275639662627,
"grad_norm": 0.439453125,
"learning_rate": 0.0001,
"loss": 1.4826,
"step": 5050
},
{
"epoch": 0.3586363314196612,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.5028,
"step": 5060
},
{
"epoch": 0.35934509887305977,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4953,
"step": 5070
},
{
"epoch": 0.3600538663264583,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.4826,
"step": 5080
},
{
"epoch": 0.36076263377985684,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.4877,
"step": 5090
},
{
"epoch": 0.3614714012332554,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.4823,
"step": 5100
},
{
"epoch": 0.3621801686866539,
"grad_norm": 0.55859375,
"learning_rate": 0.0001,
"loss": 1.4957,
"step": 5110
},
{
"epoch": 0.36288893614005246,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 1.4935,
"step": 5120
},
{
"epoch": 0.363597703593451,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.5076,
"step": 5130
},
{
"epoch": 0.36430647104684954,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4848,
"step": 5140
},
{
"epoch": 0.3650152385002481,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4892,
"step": 5150
},
{
"epoch": 0.3657240059536466,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 1.4944,
"step": 5160
},
{
"epoch": 0.36643277340704516,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4946,
"step": 5170
},
{
"epoch": 0.3671415408604437,
"grad_norm": 0.474609375,
"learning_rate": 0.0001,
"loss": 1.4826,
"step": 5180
},
{
"epoch": 0.36785030831384224,
"grad_norm": 0.470703125,
"learning_rate": 0.0001,
"loss": 1.4874,
"step": 5190
},
{
"epoch": 0.3685590757672408,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5055,
"step": 5200
},
{
"epoch": 0.3692678432206393,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.5005,
"step": 5210
},
{
"epoch": 0.36997661067403786,
"grad_norm": 0.46875,
"learning_rate": 0.0001,
"loss": 1.4842,
"step": 5220
},
{
"epoch": 0.3706853781274364,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.501,
"step": 5230
},
{
"epoch": 0.37139414558083494,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.5031,
"step": 5240
},
{
"epoch": 0.3721029130342335,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 1.4887,
"step": 5250
},
{
"epoch": 0.372811680487632,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4779,
"step": 5260
},
{
"epoch": 0.37352044794103056,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.4779,
"step": 5270
},
{
"epoch": 0.3742292153944291,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.5006,
"step": 5280
},
{
"epoch": 0.37493798284782764,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5109,
"step": 5290
},
{
"epoch": 0.3756467503012262,
"grad_norm": 0.451171875,
"learning_rate": 0.0001,
"loss": 1.4779,
"step": 5300
},
{
"epoch": 0.3763555177546247,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4861,
"step": 5310
},
{
"epoch": 0.37706428520802326,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5001,
"step": 5320
},
{
"epoch": 0.3777730526614218,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4876,
"step": 5330
},
{
"epoch": 0.37848182011482034,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.486,
"step": 5340
},
{
"epoch": 0.3791905875682189,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4956,
"step": 5350
},
{
"epoch": 0.3798993550216174,
"grad_norm": 0.61328125,
"learning_rate": 0.0001,
"loss": 1.4972,
"step": 5360
},
{
"epoch": 0.38060812247501596,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4926,
"step": 5370
},
{
"epoch": 0.3813168899284145,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4982,
"step": 5380
},
{
"epoch": 0.38202565738181304,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4949,
"step": 5390
},
{
"epoch": 0.3827344248352116,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4928,
"step": 5400
},
{
"epoch": 0.3834431922886101,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4841,
"step": 5410
},
{
"epoch": 0.38415195974200866,
"grad_norm": 0.58984375,
"learning_rate": 0.0001,
"loss": 1.4975,
"step": 5420
},
{
"epoch": 0.3848607271954072,
"grad_norm": 0.63671875,
"learning_rate": 0.0001,
"loss": 1.4907,
"step": 5430
},
{
"epoch": 0.38556949464880574,
"grad_norm": 0.453125,
"learning_rate": 0.0001,
"loss": 1.4933,
"step": 5440
},
{
"epoch": 0.3862782621022043,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.5083,
"step": 5450
},
{
"epoch": 0.3869870295556028,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4766,
"step": 5460
},
{
"epoch": 0.38769579700900136,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.485,
"step": 5470
},
{
"epoch": 0.3884045644623999,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.4891,
"step": 5480
},
{
"epoch": 0.38911333191579844,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.495,
"step": 5490
},
{
"epoch": 0.389822099369197,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4833,
"step": 5500
},
{
"epoch": 0.389822099369197,
"eval_accuracy": 0.6661451900644985,
"eval_loss": 1.4023902416229248,
"eval_runtime": 7.2106,
"eval_samples_per_second": 48.124,
"eval_steps_per_second": 0.416,
"step": 5500
},
{
"epoch": 0.3905308668225955,
"grad_norm": 0.466796875,
"learning_rate": 0.0001,
"loss": 1.5,
"step": 5510
},
{
"epoch": 0.39123963427599406,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.4789,
"step": 5520
},
{
"epoch": 0.3919484017293926,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.498,
"step": 5530
},
{
"epoch": 0.39265716918279114,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.5051,
"step": 5540
},
{
"epoch": 0.3933659366361897,
"grad_norm": 0.66015625,
"learning_rate": 0.0001,
"loss": 1.5041,
"step": 5550
},
{
"epoch": 0.3940747040895882,
"grad_norm": 0.458984375,
"learning_rate": 0.0001,
"loss": 1.4736,
"step": 5560
},
{
"epoch": 0.39478347154298676,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4863,
"step": 5570
},
{
"epoch": 0.3954922389963853,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4962,
"step": 5580
},
{
"epoch": 0.39620100644978384,
"grad_norm": 0.44921875,
"learning_rate": 0.0001,
"loss": 1.4904,
"step": 5590
},
{
"epoch": 0.3969097739031824,
"grad_norm": 0.54296875,
"learning_rate": 0.0001,
"loss": 1.4955,
"step": 5600
},
{
"epoch": 0.3976185413565809,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4925,
"step": 5610
},
{
"epoch": 0.39832730880997946,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4945,
"step": 5620
},
{
"epoch": 0.399036076263378,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.5,
"step": 5630
},
{
"epoch": 0.39974484371677654,
"grad_norm": 0.6015625,
"learning_rate": 0.0001,
"loss": 1.4946,
"step": 5640
},
{
"epoch": 0.4004536111701751,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4868,
"step": 5650
},
{
"epoch": 0.4011623786235736,
"grad_norm": 0.52734375,
"learning_rate": 0.0001,
"loss": 1.4841,
"step": 5660
},
{
"epoch": 0.40187114607697216,
"grad_norm": 0.62109375,
"learning_rate": 0.0001,
"loss": 1.494,
"step": 5670
},
{
"epoch": 0.4025799135303707,
"grad_norm": 0.51171875,
"learning_rate": 0.0001,
"loss": 1.4844,
"step": 5680
},
{
"epoch": 0.40328868098376924,
"grad_norm": 0.55078125,
"learning_rate": 0.0001,
"loss": 1.4932,
"step": 5690
},
{
"epoch": 0.4039974484371678,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.5066,
"step": 5700
},
{
"epoch": 0.4047062158905663,
"grad_norm": 0.5390625,
"learning_rate": 0.0001,
"loss": 1.498,
"step": 5710
},
{
"epoch": 0.40541498334396486,
"grad_norm": 0.482421875,
"learning_rate": 0.0001,
"loss": 1.5048,
"step": 5720
},
{
"epoch": 0.4061237507973634,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4863,
"step": 5730
},
{
"epoch": 0.40683251825076194,
"grad_norm": 0.66796875,
"learning_rate": 0.0001,
"loss": 1.4899,
"step": 5740
},
{
"epoch": 0.4075412857041605,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4792,
"step": 5750
},
{
"epoch": 0.408250053157559,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4915,
"step": 5760
},
{
"epoch": 0.40895882061095756,
"grad_norm": 0.515625,
"learning_rate": 0.0001,
"loss": 1.4923,
"step": 5770
},
{
"epoch": 0.4096675880643561,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.4757,
"step": 5780
},
{
"epoch": 0.41037635551775464,
"grad_norm": 0.484375,
"learning_rate": 0.0001,
"loss": 1.4972,
"step": 5790
},
{
"epoch": 0.4110851229711532,
"grad_norm": 0.5625,
"learning_rate": 0.0001,
"loss": 1.4846,
"step": 5800
},
{
"epoch": 0.4117938904245517,
"grad_norm": 0.56640625,
"learning_rate": 0.0001,
"loss": 1.4888,
"step": 5810
},
{
"epoch": 0.41250265787795026,
"grad_norm": 0.45703125,
"learning_rate": 0.0001,
"loss": 1.4996,
"step": 5820
},
{
"epoch": 0.4132114253313488,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4927,
"step": 5830
},
{
"epoch": 0.41392019278474734,
"grad_norm": 0.4765625,
"learning_rate": 0.0001,
"loss": 1.4861,
"step": 5840
},
{
"epoch": 0.4146289602381459,
"grad_norm": 0.53125,
"learning_rate": 0.0001,
"loss": 1.4917,
"step": 5850
},
{
"epoch": 0.4153377276915444,
"grad_norm": 0.4921875,
"learning_rate": 0.0001,
"loss": 1.5034,
"step": 5860
},
{
"epoch": 0.41604649514494296,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.5052,
"step": 5870
},
{
"epoch": 0.4167552625983415,
"grad_norm": 0.447265625,
"learning_rate": 0.0001,
"loss": 1.5002,
"step": 5880
},
{
"epoch": 0.41746403005174004,
"grad_norm": 0.47265625,
"learning_rate": 0.0001,
"loss": 1.5054,
"step": 5890
},
{
"epoch": 0.4181727975051386,
"grad_norm": 0.5546875,
"learning_rate": 0.0001,
"loss": 1.498,
"step": 5900
},
{
"epoch": 0.4188815649585371,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.4894,
"step": 5910
},
{
"epoch": 0.41959033241193566,
"grad_norm": 0.59765625,
"learning_rate": 0.0001,
"loss": 1.5121,
"step": 5920
},
{
"epoch": 0.4202990998653342,
"grad_norm": 0.48828125,
"learning_rate": 0.0001,
"loss": 1.4975,
"step": 5930
},
{
"epoch": 0.42100786731873274,
"grad_norm": 0.6484375,
"learning_rate": 0.0001,
"loss": 1.4825,
"step": 5940
},
{
"epoch": 0.4217166347721313,
"grad_norm": 0.609375,
"learning_rate": 0.0001,
"loss": 1.4985,
"step": 5950
},
{
"epoch": 0.4224254022255298,
"grad_norm": 0.5078125,
"learning_rate": 0.0001,
"loss": 1.4922,
"step": 5960
},
{
"epoch": 0.42313416967892836,
"grad_norm": 0.50390625,
"learning_rate": 0.0001,
"loss": 1.4964,
"step": 5970
},
{
"epoch": 0.4238429371323269,
"grad_norm": 0.53515625,
"learning_rate": 0.0001,
"loss": 1.4982,
"step": 5980
},
{
"epoch": 0.42455170458572544,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4778,
"step": 5990
},
{
"epoch": 0.425260472039124,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.4909,
"step": 6000
},
{
"epoch": 0.425260472039124,
"eval_accuracy": 0.6661219664101509,
"eval_loss": 1.4022555351257324,
"eval_runtime": 7.2496,
"eval_samples_per_second": 47.865,
"eval_steps_per_second": 0.414,
"step": 6000
},
{
"epoch": 0.4259692394925225,
"grad_norm": 0.48046875,
"learning_rate": 0.0001,
"loss": 1.5043,
"step": 6010
},
{
"epoch": 0.42667800694592106,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.507,
"step": 6020
},
{
"epoch": 0.4273867743993196,
"grad_norm": 0.4453125,
"learning_rate": 0.0001,
"loss": 1.4907,
"step": 6030
},
{
"epoch": 0.42809554185271814,
"grad_norm": 0.5,
"learning_rate": 0.0001,
"loss": 1.491,
"step": 6040
},
{
"epoch": 0.4288043093061167,
"grad_norm": 0.5859375,
"learning_rate": 0.0001,
"loss": 1.5023,
"step": 6050
},
{
"epoch": 0.4295130767595152,
"grad_norm": 0.486328125,
"learning_rate": 9.997960964140947e-05,
"loss": 1.4967,
"step": 6060
},
{
"epoch": 0.43022184421291376,
"grad_norm": 0.4921875,
"learning_rate": 9.991845519630678e-05,
"loss": 1.4897,
"step": 6070
},
{
"epoch": 0.4309306116663123,
"grad_norm": 0.5703125,
"learning_rate": 9.981658654313457e-05,
"loss": 1.4917,
"step": 6080
},
{
"epoch": 0.43163937911971084,
"grad_norm": 0.5,
"learning_rate": 9.967408676742751e-05,
"loss": 1.5015,
"step": 6090
},
{
"epoch": 0.4323481465731094,
"grad_norm": 0.486328125,
"learning_rate": 9.949107209404665e-05,
"loss": 1.4798,
"step": 6100
},
{
"epoch": 0.4330569140265079,
"grad_norm": 0.4453125,
"learning_rate": 9.926769179238466e-05,
"loss": 1.4874,
"step": 6110
},
{
"epoch": 0.43376568147990646,
"grad_norm": 0.62109375,
"learning_rate": 9.900412805461967e-05,
"loss": 1.4951,
"step": 6120
},
{
"epoch": 0.434474448933305,
"grad_norm": 0.47265625,
"learning_rate": 9.870059584711668e-05,
"loss": 1.4917,
"step": 6130
},
{
"epoch": 0.43518321638670354,
"grad_norm": 0.51953125,
"learning_rate": 9.835734273509786e-05,
"loss": 1.5102,
"step": 6140
},
{
"epoch": 0.4358919838401021,
"grad_norm": 0.482421875,
"learning_rate": 9.797464868072488e-05,
"loss": 1.485,
"step": 6150
},
{
"epoch": 0.4366007512935006,
"grad_norm": 0.5234375,
"learning_rate": 9.755282581475769e-05,
"loss": 1.4967,
"step": 6160
},
{
"epoch": 0.43730951874689916,
"grad_norm": 0.4609375,
"learning_rate": 9.709221818197624e-05,
"loss": 1.4827,
"step": 6170
},
{
"epoch": 0.4380182862002977,
"grad_norm": 0.56640625,
"learning_rate": 9.659320146057262e-05,
"loss": 1.4919,
"step": 6180
},
{
"epoch": 0.43872705365369624,
"grad_norm": 0.4921875,
"learning_rate": 9.60561826557425e-05,
"loss": 1.5128,
"step": 6190
},
{
"epoch": 0.4394358211070948,
"grad_norm": 0.44921875,
"learning_rate": 9.548159976772592e-05,
"loss": 1.496,
"step": 6200
},
{
"epoch": 0.4401445885604933,
"grad_norm": 0.51953125,
"learning_rate": 9.486992143456792e-05,
"loss": 1.4835,
"step": 6210
},
{
"epoch": 0.44085335601389186,
"grad_norm": 0.45703125,
"learning_rate": 9.422164654989072e-05,
"loss": 1.4907,
"step": 6220
},
{
"epoch": 0.4415621234672904,
"grad_norm": 0.5703125,
"learning_rate": 9.353730385598887e-05,
"loss": 1.4982,
"step": 6230
},
{
"epoch": 0.44227089092068894,
"grad_norm": 0.447265625,
"learning_rate": 9.281745151257946e-05,
"loss": 1.493,
"step": 6240
},
{
"epoch": 0.4429796583740875,
"grad_norm": 0.48046875,
"learning_rate": 9.206267664155907e-05,
"loss": 1.4849,
"step": 6250
},
{
"epoch": 0.443688425827486,
"grad_norm": 0.484375,
"learning_rate": 9.12735948481387e-05,
"loss": 1.495,
"step": 6260
},
{
"epoch": 0.44439719328088456,
"grad_norm": 0.47265625,
"learning_rate": 9.045084971874738e-05,
"loss": 1.4908,
"step": 6270
},
{
"epoch": 0.4451059607342831,
"grad_norm": 0.462890625,
"learning_rate": 8.959511229611376e-05,
"loss": 1.4864,
"step": 6280
},
{
"epoch": 0.44581472818768164,
"grad_norm": 0.48046875,
"learning_rate": 8.870708053195413e-05,
"loss": 1.4839,
"step": 6290
},
{
"epoch": 0.4465234956410802,
"grad_norm": 0.4453125,
"learning_rate": 8.778747871771292e-05,
"loss": 1.4995,
"step": 6300
},
{
"epoch": 0.4472322630944787,
"grad_norm": 0.45703125,
"learning_rate": 8.683705689382024e-05,
"loss": 1.4856,
"step": 6310
},
{
"epoch": 0.44794103054787726,
"grad_norm": 0.51953125,
"learning_rate": 8.585659023794818e-05,
"loss": 1.4933,
"step": 6320
},
{
"epoch": 0.4486497980012758,
"grad_norm": 0.490234375,
"learning_rate": 8.484687843276469e-05,
"loss": 1.4798,
"step": 6330
},
{
"epoch": 0.44935856545467434,
"grad_norm": 0.439453125,
"learning_rate": 8.380874501370097e-05,
"loss": 1.4972,
"step": 6340
},
{
"epoch": 0.4500673329080729,
"grad_norm": 0.45703125,
"learning_rate": 8.274303669726426e-05,
"loss": 1.482,
"step": 6350
},
{
"epoch": 0.4507761003614714,
"grad_norm": 0.44140625,
"learning_rate": 8.165062269044353e-05,
"loss": 1.4946,
"step": 6360
},
{
"epoch": 0.45148486781486996,
"grad_norm": 0.443359375,
"learning_rate": 8.053239398177191e-05,
"loss": 1.487,
"step": 6370
},
{
"epoch": 0.4521936352682685,
"grad_norm": 0.51953125,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5043,
"step": 6380
},
{
"epoch": 0.45290240272166704,
"grad_norm": 0.4921875,
"learning_rate": 7.822216094333847e-05,
"loss": 1.4912,
"step": 6390
},
{
"epoch": 0.4536111701750656,
"grad_norm": 0.455078125,
"learning_rate": 7.703204087277988e-05,
"loss": 1.4895,
"step": 6400
},
{
"epoch": 0.4543199376284641,
"grad_norm": 0.470703125,
"learning_rate": 7.58198730819481e-05,
"loss": 1.5003,
"step": 6410
},
{
"epoch": 0.45502870508186266,
"grad_norm": 0.5078125,
"learning_rate": 7.45866462322802e-05,
"loss": 1.4872,
"step": 6420
},
{
"epoch": 0.4557374725352612,
"grad_norm": 0.435546875,
"learning_rate": 7.333336616128369e-05,
"loss": 1.4993,
"step": 6430
},
{
"epoch": 0.45644623998865974,
"grad_norm": 0.396484375,
"learning_rate": 7.206105506216106e-05,
"loss": 1.5037,
"step": 6440
},
{
"epoch": 0.4571550074420583,
"grad_norm": 0.43359375,
"learning_rate": 7.077075065009433e-05,
"loss": 1.5001,
"step": 6450
},
{
"epoch": 0.4578637748954568,
"grad_norm": 0.478515625,
"learning_rate": 6.946350531586959e-05,
"loss": 1.5007,
"step": 6460
},
{
"epoch": 0.45857254234885536,
"grad_norm": 0.40625,
"learning_rate": 6.814038526753205e-05,
"loss": 1.4763,
"step": 6470
},
{
"epoch": 0.4592813098022539,
"grad_norm": 0.4453125,
"learning_rate": 6.680246966077151e-05,
"loss": 1.4987,
"step": 6480
},
{
"epoch": 0.45999007725565244,
"grad_norm": 0.3984375,
"learning_rate": 6.545084971874738e-05,
"loss": 1.4925,
"step": 6490
},
{
"epoch": 0.460698844709051,
"grad_norm": 0.40234375,
"learning_rate": 6.408662784207149e-05,
"loss": 1.4923,
"step": 6500
},
{
"epoch": 0.460698844709051,
"eval_accuracy": 0.666518879775364,
"eval_loss": 1.39996337890625,
"eval_runtime": 7.2215,
"eval_samples_per_second": 48.051,
"eval_steps_per_second": 0.415,
"step": 6500
},
{
"epoch": 0.4614076121624495,
"grad_norm": 0.42578125,
"learning_rate": 6.271091670967436e-05,
"loss": 1.4809,
"step": 6510
},
{
"epoch": 0.46211637961584806,
"grad_norm": 0.41015625,
"learning_rate": 6.132483837128823e-05,
"loss": 1.4809,
"step": 6520
},
{
"epoch": 0.4628251470692466,
"grad_norm": 0.41796875,
"learning_rate": 5.992952333228728e-05,
"loss": 1.5065,
"step": 6530
},
{
"epoch": 0.46353391452264514,
"grad_norm": 0.453125,
"learning_rate": 5.85261096316312e-05,
"loss": 1.4931,
"step": 6540
},
{
"epoch": 0.4642426819760437,
"grad_norm": 0.431640625,
"learning_rate": 5.7115741913664264e-05,
"loss": 1.4766,
"step": 6550
},
{
"epoch": 0.4649514494294422,
"grad_norm": 0.412109375,
"learning_rate": 5.569957049452703e-05,
"loss": 1.49,
"step": 6560
},
{
"epoch": 0.46566021688284076,
"grad_norm": 0.423828125,
"learning_rate": 5.427875042394199e-05,
"loss": 1.4955,
"step": 6570
},
{
"epoch": 0.4663689843362393,
"grad_norm": 0.4375,
"learning_rate": 5.2854440543138406e-05,
"loss": 1.4972,
"step": 6580
},
{
"epoch": 0.46707775178963784,
"grad_norm": 0.408203125,
"learning_rate": 5.142780253968481e-05,
"loss": 1.4879,
"step": 6590
},
{
"epoch": 0.4677865192430364,
"grad_norm": 0.380859375,
"learning_rate": 5e-05,
"loss": 1.493,
"step": 6600
},
{
"epoch": 0.4684952866964349,
"grad_norm": 0.3671875,
"learning_rate": 4.85721974603152e-05,
"loss": 1.4995,
"step": 6610
},
{
"epoch": 0.46920405414983346,
"grad_norm": 0.408203125,
"learning_rate": 4.71455594568616e-05,
"loss": 1.4771,
"step": 6620
},
{
"epoch": 0.469912821603232,
"grad_norm": 0.419921875,
"learning_rate": 4.5721249576058027e-05,
"loss": 1.4815,
"step": 6630
},
{
"epoch": 0.47062158905663054,
"grad_norm": 0.396484375,
"learning_rate": 4.4300429505472976e-05,
"loss": 1.4733,
"step": 6640
},
{
"epoch": 0.4713303565100291,
"grad_norm": 0.365234375,
"learning_rate": 4.288425808633575e-05,
"loss": 1.4976,
"step": 6650
},
{
"epoch": 0.4720391239634276,
"grad_norm": 0.35546875,
"learning_rate": 4.147389036836881e-05,
"loss": 1.4884,
"step": 6660
},
{
"epoch": 0.47274789141682616,
"grad_norm": 0.375,
"learning_rate": 4.007047666771274e-05,
"loss": 1.4969,
"step": 6670
},
{
"epoch": 0.4734566588702247,
"grad_norm": 0.375,
"learning_rate": 3.8675161628711776e-05,
"loss": 1.5137,
"step": 6680
},
{
"epoch": 0.47416542632362324,
"grad_norm": 0.365234375,
"learning_rate": 3.728908329032567e-05,
"loss": 1.4889,
"step": 6690
},
{
"epoch": 0.4748741937770218,
"grad_norm": 0.408203125,
"learning_rate": 3.591337215792852e-05,
"loss": 1.4875,
"step": 6700
},
{
"epoch": 0.4755829612304203,
"grad_norm": 0.349609375,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.5153,
"step": 6710
},
{
"epoch": 0.47629172868381886,
"grad_norm": 0.353515625,
"learning_rate": 3.3197530339228487e-05,
"loss": 1.4904,
"step": 6720
},
{
"epoch": 0.4770004961372174,
"grad_norm": 0.349609375,
"learning_rate": 3.1859614732467954e-05,
"loss": 1.4851,
"step": 6730
},
{
"epoch": 0.47770926359061594,
"grad_norm": 0.34765625,
"learning_rate": 3.053649468413043e-05,
"loss": 1.4979,
"step": 6740
},
{
"epoch": 0.4784180310440145,
"grad_norm": 0.33203125,
"learning_rate": 2.9229249349905684e-05,
"loss": 1.492,
"step": 6750
},
{
"epoch": 0.479126798497413,
"grad_norm": 0.345703125,
"learning_rate": 2.7938944937838923e-05,
"loss": 1.4843,
"step": 6760
},
{
"epoch": 0.47983556595081156,
"grad_norm": 0.34765625,
"learning_rate": 2.6666633838716314e-05,
"loss": 1.4781,
"step": 6770
},
{
"epoch": 0.4805443334042101,
"grad_norm": 0.349609375,
"learning_rate": 2.5413353767719805e-05,
"loss": 1.4949,
"step": 6780
},
{
"epoch": 0.48125310085760864,
"grad_norm": 0.35546875,
"learning_rate": 2.418012691805191e-05,
"loss": 1.4961,
"step": 6790
},
{
"epoch": 0.4819618683110072,
"grad_norm": 0.38671875,
"learning_rate": 2.296795912722014e-05,
"loss": 1.4942,
"step": 6800
},
{
"epoch": 0.4826706357644057,
"grad_norm": 0.333984375,
"learning_rate": 2.1777839056661554e-05,
"loss": 1.49,
"step": 6810
},
{
"epoch": 0.48337940321780426,
"grad_norm": 0.361328125,
"learning_rate": 2.061073738537635e-05,
"loss": 1.5085,
"step": 6820
},
{
"epoch": 0.4840881706712028,
"grad_norm": 0.34765625,
"learning_rate": 1.946760601822809e-05,
"loss": 1.4926,
"step": 6830
},
{
"epoch": 0.48479693812460134,
"grad_norm": 0.3203125,
"learning_rate": 1.8349377309556486e-05,
"loss": 1.4815,
"step": 6840
},
{
"epoch": 0.4855057055779999,
"grad_norm": 0.328125,
"learning_rate": 1.725696330273575e-05,
"loss": 1.5001,
"step": 6850
},
{
"epoch": 0.4862144730313984,
"grad_norm": 0.3359375,
"learning_rate": 1.619125498629904e-05,
"loss": 1.481,
"step": 6860
},
{
"epoch": 0.48692324048479696,
"grad_norm": 0.353515625,
"learning_rate": 1.5153121567235335e-05,
"loss": 1.4781,
"step": 6870
},
{
"epoch": 0.4876320079381955,
"grad_norm": 0.31640625,
"learning_rate": 1.414340976205183e-05,
"loss": 1.4906,
"step": 6880
},
{
"epoch": 0.48834077539159404,
"grad_norm": 0.328125,
"learning_rate": 1.3162943106179749e-05,
"loss": 1.5031,
"step": 6890
},
{
"epoch": 0.4890495428449926,
"grad_norm": 0.328125,
"learning_rate": 1.2212521282287092e-05,
"loss": 1.4878,
"step": 6900
},
{
"epoch": 0.4897583102983911,
"grad_norm": 0.322265625,
"learning_rate": 1.1292919468045877e-05,
"loss": 1.4929,
"step": 6910
},
{
"epoch": 0.49046707775178966,
"grad_norm": 0.33984375,
"learning_rate": 1.0404887703886251e-05,
"loss": 1.4889,
"step": 6920
},
{
"epoch": 0.4911758452051882,
"grad_norm": 0.359375,
"learning_rate": 9.549150281252633e-06,
"loss": 1.4783,
"step": 6930
},
{
"epoch": 0.49188461265858674,
"grad_norm": 0.322265625,
"learning_rate": 8.7264051518613e-06,
"loss": 1.4928,
"step": 6940
},
{
"epoch": 0.4925933801119853,
"grad_norm": 0.3359375,
"learning_rate": 7.937323358440935e-06,
"loss": 1.489,
"step": 6950
},
{
"epoch": 0.4933021475653838,
"grad_norm": 0.328125,
"learning_rate": 7.182548487420554e-06,
"loss": 1.4899,
"step": 6960
},
{
"epoch": 0.49401091501878236,
"grad_norm": 0.33984375,
"learning_rate": 6.462696144011149e-06,
"loss": 1.4859,
"step": 6970
},
{
"epoch": 0.4947196824721809,
"grad_norm": 0.34375,
"learning_rate": 5.778353450109286e-06,
"loss": 1.4944,
"step": 6980
},
{
"epoch": 0.49542844992557944,
"grad_norm": 0.33203125,
"learning_rate": 5.13007856543209e-06,
"loss": 1.481,
"step": 6990
},
{
"epoch": 0.496137217378978,
"grad_norm": 0.333984375,
"learning_rate": 4.5184002322740785e-06,
"loss": 1.4965,
"step": 7000
},
{
"epoch": 0.496137217378978,
"eval_accuracy": 0.6668890507507222,
"eval_loss": 1.397897720336914,
"eval_runtime": 7.477,
"eval_samples_per_second": 46.409,
"eval_steps_per_second": 0.401,
"step": 7000
},
{
"epoch": 0.4968459848323765,
"grad_norm": 0.373046875,
"learning_rate": 3.9438173442575e-06,
"loss": 1.492,
"step": 7010
},
{
"epoch": 0.49755475228577506,
"grad_norm": 0.310546875,
"learning_rate": 3.406798539427386e-06,
"loss": 1.4714,
"step": 7020
},
{
"epoch": 0.4982635197391736,
"grad_norm": 0.31640625,
"learning_rate": 2.9077818180237693e-06,
"loss": 1.4839,
"step": 7030
},
{
"epoch": 0.49897228719257214,
"grad_norm": 0.33203125,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.4976,
"step": 7040
},
{
"epoch": 0.4996810546459707,
"grad_norm": 0.328125,
"learning_rate": 2.0253513192751373e-06,
"loss": 1.4857,
"step": 7050
},
{
"epoch": 0.50003543837267,
"step": 7055,
"total_flos": 1.568873006179696e+20,
"train_loss": 1.4944760516549913,
"train_runtime": 7219.1988,
"train_samples_per_second": 125.076,
"train_steps_per_second": 0.977
}
],
"logging_steps": 10,
"max_steps": 7055,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.568873006179696e+20,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}