roberta-base-latin-cased2 / trainer_state.json
pstroe's picture
Upload trainer_state.json
d1beeaa
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.922198429693076,
"global_step": 100000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 4.975e-05,
"loss": 7.9042,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 4.9500000000000004e-05,
"loss": 7.2216,
"step": 1000
},
{
"epoch": 0.13,
"learning_rate": 4.9250000000000004e-05,
"loss": 6.9603,
"step": 1500
},
{
"epoch": 0.18,
"learning_rate": 4.9e-05,
"loss": 6.6905,
"step": 2000
},
{
"epoch": 0.22,
"learning_rate": 4.875e-05,
"loss": 6.3394,
"step": 2500
},
{
"epoch": 0.27,
"learning_rate": 4.85e-05,
"loss": 6.0743,
"step": 3000
},
{
"epoch": 0.31,
"learning_rate": 4.825e-05,
"loss": 5.9176,
"step": 3500
},
{
"epoch": 0.36,
"learning_rate": 4.8e-05,
"loss": 5.803,
"step": 4000
},
{
"epoch": 0.4,
"learning_rate": 4.775e-05,
"loss": 5.6989,
"step": 4500
},
{
"epoch": 0.45,
"learning_rate": 4.75e-05,
"loss": 5.6264,
"step": 5000
},
{
"epoch": 0.49,
"learning_rate": 4.7249999999999997e-05,
"loss": 5.5409,
"step": 5500
},
{
"epoch": 0.54,
"learning_rate": 4.7e-05,
"loss": 5.4755,
"step": 6000
},
{
"epoch": 0.58,
"learning_rate": 4.6750000000000005e-05,
"loss": 5.412,
"step": 6500
},
{
"epoch": 0.62,
"learning_rate": 4.6500000000000005e-05,
"loss": 5.3351,
"step": 7000
},
{
"epoch": 0.67,
"learning_rate": 4.6250000000000006e-05,
"loss": 5.2848,
"step": 7500
},
{
"epoch": 0.71,
"learning_rate": 4.600000000000001e-05,
"loss": 5.2393,
"step": 8000
},
{
"epoch": 0.76,
"learning_rate": 4.575e-05,
"loss": 5.1773,
"step": 8500
},
{
"epoch": 0.8,
"learning_rate": 4.55e-05,
"loss": 5.1301,
"step": 9000
},
{
"epoch": 0.85,
"learning_rate": 4.525e-05,
"loss": 5.0809,
"step": 9500
},
{
"epoch": 0.89,
"learning_rate": 4.5e-05,
"loss": 5.03,
"step": 10000
},
{
"epoch": 0.94,
"learning_rate": 4.4750000000000004e-05,
"loss": 4.9868,
"step": 10500
},
{
"epoch": 0.98,
"learning_rate": 4.4500000000000004e-05,
"loss": 4.9555,
"step": 11000
},
{
"epoch": 1.03,
"learning_rate": 4.4250000000000005e-05,
"loss": 4.9101,
"step": 11500
},
{
"epoch": 1.07,
"learning_rate": 4.4000000000000006e-05,
"loss": 4.8653,
"step": 12000
},
{
"epoch": 1.12,
"learning_rate": 4.375e-05,
"loss": 4.8241,
"step": 12500
},
{
"epoch": 1.16,
"learning_rate": 4.35e-05,
"loss": 4.7926,
"step": 13000
},
{
"epoch": 1.2,
"learning_rate": 4.325e-05,
"loss": 4.7647,
"step": 13500
},
{
"epoch": 1.25,
"learning_rate": 4.3e-05,
"loss": 4.7273,
"step": 14000
},
{
"epoch": 1.29,
"learning_rate": 4.275e-05,
"loss": 4.6983,
"step": 14500
},
{
"epoch": 1.34,
"learning_rate": 4.25e-05,
"loss": 4.6703,
"step": 15000
},
{
"epoch": 1.38,
"learning_rate": 4.2250000000000004e-05,
"loss": 4.6422,
"step": 15500
},
{
"epoch": 1.43,
"learning_rate": 4.2e-05,
"loss": 4.6041,
"step": 16000
},
{
"epoch": 1.47,
"learning_rate": 4.175e-05,
"loss": 4.5803,
"step": 16500
},
{
"epoch": 1.52,
"learning_rate": 4.15e-05,
"loss": 4.5505,
"step": 17000
},
{
"epoch": 1.56,
"learning_rate": 4.125e-05,
"loss": 4.5344,
"step": 17500
},
{
"epoch": 1.61,
"learning_rate": 4.1e-05,
"loss": 4.4981,
"step": 18000
},
{
"epoch": 1.65,
"learning_rate": 4.075e-05,
"loss": 4.4813,
"step": 18500
},
{
"epoch": 1.7,
"learning_rate": 4.05e-05,
"loss": 4.4484,
"step": 19000
},
{
"epoch": 1.74,
"learning_rate": 4.025e-05,
"loss": 4.4253,
"step": 19500
},
{
"epoch": 1.78,
"learning_rate": 4e-05,
"loss": 4.4023,
"step": 20000
},
{
"epoch": 1.83,
"learning_rate": 3.9750000000000004e-05,
"loss": 4.3891,
"step": 20500
},
{
"epoch": 1.87,
"learning_rate": 3.9500000000000005e-05,
"loss": 4.365,
"step": 21000
},
{
"epoch": 1.92,
"learning_rate": 3.9250000000000005e-05,
"loss": 4.3448,
"step": 21500
},
{
"epoch": 1.96,
"learning_rate": 3.9000000000000006e-05,
"loss": 4.3206,
"step": 22000
},
{
"epoch": 2.01,
"learning_rate": 3.875e-05,
"loss": 4.2914,
"step": 22500
},
{
"epoch": 2.05,
"learning_rate": 3.85e-05,
"loss": 4.2772,
"step": 23000
},
{
"epoch": 2.1,
"learning_rate": 3.825e-05,
"loss": 4.2573,
"step": 23500
},
{
"epoch": 2.14,
"learning_rate": 3.8e-05,
"loss": 4.2464,
"step": 24000
},
{
"epoch": 2.19,
"learning_rate": 3.775e-05,
"loss": 4.2318,
"step": 24500
},
{
"epoch": 2.23,
"learning_rate": 3.7500000000000003e-05,
"loss": 4.2182,
"step": 25000
},
{
"epoch": 2.28,
"learning_rate": 3.7250000000000004e-05,
"loss": 4.2008,
"step": 25500
},
{
"epoch": 2.32,
"learning_rate": 3.7e-05,
"loss": 4.1774,
"step": 26000
},
{
"epoch": 2.36,
"learning_rate": 3.675e-05,
"loss": 4.165,
"step": 26500
},
{
"epoch": 2.41,
"learning_rate": 3.65e-05,
"loss": 4.149,
"step": 27000
},
{
"epoch": 2.45,
"learning_rate": 3.625e-05,
"loss": 4.1298,
"step": 27500
},
{
"epoch": 2.5,
"learning_rate": 3.6e-05,
"loss": 4.1297,
"step": 28000
},
{
"epoch": 2.54,
"learning_rate": 3.575e-05,
"loss": 4.1118,
"step": 28500
},
{
"epoch": 2.59,
"learning_rate": 3.55e-05,
"loss": 4.1006,
"step": 29000
},
{
"epoch": 2.63,
"learning_rate": 3.525e-05,
"loss": 4.0858,
"step": 29500
},
{
"epoch": 2.68,
"learning_rate": 3.5e-05,
"loss": 4.079,
"step": 30000
},
{
"epoch": 2.72,
"learning_rate": 3.475e-05,
"loss": 4.0601,
"step": 30500
},
{
"epoch": 2.77,
"learning_rate": 3.45e-05,
"loss": 4.0448,
"step": 31000
},
{
"epoch": 2.81,
"learning_rate": 3.4250000000000006e-05,
"loss": 4.0449,
"step": 31500
},
{
"epoch": 2.86,
"learning_rate": 3.4000000000000007e-05,
"loss": 4.0226,
"step": 32000
},
{
"epoch": 2.9,
"learning_rate": 3.375000000000001e-05,
"loss": 4.0213,
"step": 32500
},
{
"epoch": 2.94,
"learning_rate": 3.35e-05,
"loss": 4.0116,
"step": 33000
},
{
"epoch": 2.99,
"learning_rate": 3.325e-05,
"loss": 4.0,
"step": 33500
},
{
"epoch": 3.03,
"learning_rate": 3.3e-05,
"loss": 3.9855,
"step": 34000
},
{
"epoch": 3.08,
"learning_rate": 3.275e-05,
"loss": 3.9693,
"step": 34500
},
{
"epoch": 3.12,
"learning_rate": 3.2500000000000004e-05,
"loss": 3.9603,
"step": 35000
},
{
"epoch": 3.17,
"learning_rate": 3.2250000000000005e-05,
"loss": 3.9484,
"step": 35500
},
{
"epoch": 3.21,
"learning_rate": 3.2000000000000005e-05,
"loss": 3.9402,
"step": 36000
},
{
"epoch": 3.26,
"learning_rate": 3.175e-05,
"loss": 3.9335,
"step": 36500
},
{
"epoch": 3.3,
"learning_rate": 3.15e-05,
"loss": 3.9221,
"step": 37000
},
{
"epoch": 3.35,
"learning_rate": 3.125e-05,
"loss": 3.9167,
"step": 37500
},
{
"epoch": 3.39,
"learning_rate": 3.1e-05,
"loss": 3.9162,
"step": 38000
},
{
"epoch": 3.44,
"learning_rate": 3.075e-05,
"loss": 3.9,
"step": 38500
},
{
"epoch": 3.48,
"learning_rate": 3.05e-05,
"loss": 3.8932,
"step": 39000
},
{
"epoch": 3.52,
"learning_rate": 3.025e-05,
"loss": 3.8861,
"step": 39500
},
{
"epoch": 3.57,
"learning_rate": 3e-05,
"loss": 3.8776,
"step": 40000
},
{
"epoch": 3.61,
"learning_rate": 2.975e-05,
"loss": 3.8587,
"step": 40500
},
{
"epoch": 3.66,
"learning_rate": 2.95e-05,
"loss": 3.8668,
"step": 41000
},
{
"epoch": 3.7,
"learning_rate": 2.925e-05,
"loss": 3.8491,
"step": 41500
},
{
"epoch": 3.75,
"learning_rate": 2.9e-05,
"loss": 3.8415,
"step": 42000
},
{
"epoch": 3.79,
"learning_rate": 2.8749999999999997e-05,
"loss": 3.8413,
"step": 42500
},
{
"epoch": 3.84,
"learning_rate": 2.8499999999999998e-05,
"loss": 3.8399,
"step": 43000
},
{
"epoch": 3.88,
"learning_rate": 2.825e-05,
"loss": 3.8294,
"step": 43500
},
{
"epoch": 3.93,
"learning_rate": 2.8000000000000003e-05,
"loss": 3.8218,
"step": 44000
},
{
"epoch": 3.97,
"learning_rate": 2.7750000000000004e-05,
"loss": 3.8192,
"step": 44500
},
{
"epoch": 4.01,
"learning_rate": 2.7500000000000004e-05,
"loss": 3.8031,
"step": 45000
},
{
"epoch": 4.06,
"learning_rate": 2.725e-05,
"loss": 3.8007,
"step": 45500
},
{
"epoch": 4.1,
"learning_rate": 2.7000000000000002e-05,
"loss": 3.7875,
"step": 46000
},
{
"epoch": 4.15,
"learning_rate": 2.6750000000000003e-05,
"loss": 3.7852,
"step": 46500
},
{
"epoch": 4.19,
"learning_rate": 2.6500000000000004e-05,
"loss": 3.775,
"step": 47000
},
{
"epoch": 4.24,
"learning_rate": 2.625e-05,
"loss": 3.7623,
"step": 47500
},
{
"epoch": 4.28,
"learning_rate": 2.6000000000000002e-05,
"loss": 3.7625,
"step": 48000
},
{
"epoch": 4.33,
"learning_rate": 2.5750000000000002e-05,
"loss": 3.7596,
"step": 48500
},
{
"epoch": 4.37,
"learning_rate": 2.5500000000000003e-05,
"loss": 3.754,
"step": 49000
},
{
"epoch": 4.42,
"learning_rate": 2.525e-05,
"loss": 3.7532,
"step": 49500
},
{
"epoch": 4.46,
"learning_rate": 2.5e-05,
"loss": 3.7562,
"step": 50000
},
{
"epoch": 4.51,
"learning_rate": 2.4750000000000002e-05,
"loss": 3.7376,
"step": 50500
},
{
"epoch": 4.55,
"learning_rate": 2.45e-05,
"loss": 3.73,
"step": 51000
},
{
"epoch": 4.59,
"learning_rate": 2.425e-05,
"loss": 3.7403,
"step": 51500
},
{
"epoch": 4.64,
"learning_rate": 2.4e-05,
"loss": 3.7225,
"step": 52000
},
{
"epoch": 4.68,
"learning_rate": 2.375e-05,
"loss": 3.7258,
"step": 52500
},
{
"epoch": 4.73,
"learning_rate": 2.35e-05,
"loss": 3.7193,
"step": 53000
},
{
"epoch": 4.77,
"learning_rate": 2.3250000000000003e-05,
"loss": 3.7119,
"step": 53500
},
{
"epoch": 4.82,
"learning_rate": 2.3000000000000003e-05,
"loss": 3.7166,
"step": 54000
},
{
"epoch": 4.86,
"learning_rate": 2.275e-05,
"loss": 3.7069,
"step": 54500
},
{
"epoch": 4.91,
"learning_rate": 2.25e-05,
"loss": 3.6986,
"step": 55000
},
{
"epoch": 4.95,
"learning_rate": 2.2250000000000002e-05,
"loss": 3.7003,
"step": 55500
},
{
"epoch": 5.0,
"learning_rate": 2.2000000000000003e-05,
"loss": 3.6822,
"step": 56000
},
{
"epoch": 5.04,
"learning_rate": 2.175e-05,
"loss": 3.688,
"step": 56500
},
{
"epoch": 5.09,
"learning_rate": 2.15e-05,
"loss": 3.6611,
"step": 57000
},
{
"epoch": 5.13,
"learning_rate": 2.125e-05,
"loss": 3.6661,
"step": 57500
},
{
"epoch": 5.17,
"learning_rate": 2.1e-05,
"loss": 3.6626,
"step": 58000
},
{
"epoch": 5.22,
"learning_rate": 2.075e-05,
"loss": 3.6588,
"step": 58500
},
{
"epoch": 5.26,
"learning_rate": 2.05e-05,
"loss": 3.6508,
"step": 59000
},
{
"epoch": 5.31,
"learning_rate": 2.025e-05,
"loss": 3.6492,
"step": 59500
},
{
"epoch": 5.35,
"learning_rate": 2e-05,
"loss": 3.6398,
"step": 60000
},
{
"epoch": 5.4,
"learning_rate": 1.9750000000000002e-05,
"loss": 3.6405,
"step": 60500
},
{
"epoch": 5.44,
"learning_rate": 1.9500000000000003e-05,
"loss": 3.6377,
"step": 61000
},
{
"epoch": 5.49,
"learning_rate": 1.925e-05,
"loss": 3.6326,
"step": 61500
},
{
"epoch": 5.53,
"learning_rate": 1.9e-05,
"loss": 3.6305,
"step": 62000
},
{
"epoch": 5.58,
"learning_rate": 1.8750000000000002e-05,
"loss": 3.6291,
"step": 62500
},
{
"epoch": 5.62,
"learning_rate": 1.85e-05,
"loss": 3.6258,
"step": 63000
},
{
"epoch": 5.67,
"learning_rate": 1.825e-05,
"loss": 3.6178,
"step": 63500
},
{
"epoch": 5.71,
"learning_rate": 1.8e-05,
"loss": 3.6175,
"step": 64000
},
{
"epoch": 5.75,
"learning_rate": 1.775e-05,
"loss": 3.6075,
"step": 64500
},
{
"epoch": 5.8,
"learning_rate": 1.75e-05,
"loss": 3.6102,
"step": 65000
},
{
"epoch": 5.84,
"learning_rate": 1.725e-05,
"loss": 3.6099,
"step": 65500
},
{
"epoch": 5.89,
"learning_rate": 1.7000000000000003e-05,
"loss": 3.5967,
"step": 66000
},
{
"epoch": 5.93,
"learning_rate": 1.675e-05,
"loss": 3.6015,
"step": 66500
},
{
"epoch": 5.98,
"learning_rate": 1.65e-05,
"loss": 3.5961,
"step": 67000
},
{
"epoch": 6.02,
"learning_rate": 1.6250000000000002e-05,
"loss": 3.5936,
"step": 67500
},
{
"epoch": 6.07,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.5843,
"step": 68000
},
{
"epoch": 6.11,
"learning_rate": 1.575e-05,
"loss": 3.5877,
"step": 68500
},
{
"epoch": 6.16,
"learning_rate": 1.55e-05,
"loss": 3.5809,
"step": 69000
},
{
"epoch": 6.2,
"learning_rate": 1.525e-05,
"loss": 3.5796,
"step": 69500
},
{
"epoch": 6.25,
"learning_rate": 1.5e-05,
"loss": 3.5755,
"step": 70000
},
{
"epoch": 6.29,
"learning_rate": 1.475e-05,
"loss": 3.5666,
"step": 70500
},
{
"epoch": 6.33,
"learning_rate": 1.45e-05,
"loss": 3.5714,
"step": 71000
},
{
"epoch": 6.38,
"learning_rate": 1.4249999999999999e-05,
"loss": 3.5577,
"step": 71500
},
{
"epoch": 6.42,
"learning_rate": 1.4000000000000001e-05,
"loss": 3.5643,
"step": 72000
},
{
"epoch": 6.47,
"learning_rate": 1.3750000000000002e-05,
"loss": 3.5609,
"step": 72500
},
{
"epoch": 6.51,
"learning_rate": 1.3500000000000001e-05,
"loss": 3.5585,
"step": 73000
},
{
"epoch": 6.56,
"learning_rate": 1.3250000000000002e-05,
"loss": 3.5542,
"step": 73500
},
{
"epoch": 6.6,
"learning_rate": 1.3000000000000001e-05,
"loss": 3.5564,
"step": 74000
},
{
"epoch": 6.65,
"learning_rate": 1.2750000000000002e-05,
"loss": 3.5528,
"step": 74500
},
{
"epoch": 6.69,
"learning_rate": 1.25e-05,
"loss": 3.5483,
"step": 75000
},
{
"epoch": 6.74,
"learning_rate": 1.225e-05,
"loss": 3.5511,
"step": 75500
},
{
"epoch": 6.78,
"learning_rate": 1.2e-05,
"loss": 3.5467,
"step": 76000
},
{
"epoch": 6.83,
"learning_rate": 1.175e-05,
"loss": 3.5371,
"step": 76500
},
{
"epoch": 6.87,
"learning_rate": 1.1500000000000002e-05,
"loss": 3.5459,
"step": 77000
},
{
"epoch": 6.91,
"learning_rate": 1.125e-05,
"loss": 3.5366,
"step": 77500
},
{
"epoch": 6.96,
"learning_rate": 1.1000000000000001e-05,
"loss": 3.5386,
"step": 78000
},
{
"epoch": 7.0,
"learning_rate": 1.075e-05,
"loss": 3.533,
"step": 78500
},
{
"epoch": 7.05,
"learning_rate": 1.05e-05,
"loss": 3.5223,
"step": 79000
},
{
"epoch": 7.09,
"learning_rate": 1.025e-05,
"loss": 3.5232,
"step": 79500
},
{
"epoch": 7.14,
"learning_rate": 1e-05,
"loss": 3.5195,
"step": 80000
},
{
"epoch": 7.18,
"learning_rate": 9.750000000000002e-06,
"loss": 3.5193,
"step": 80500
},
{
"epoch": 7.23,
"learning_rate": 9.5e-06,
"loss": 3.516,
"step": 81000
},
{
"epoch": 7.27,
"learning_rate": 9.25e-06,
"loss": 3.5066,
"step": 81500
},
{
"epoch": 7.32,
"learning_rate": 9e-06,
"loss": 3.5188,
"step": 82000
},
{
"epoch": 7.36,
"learning_rate": 8.75e-06,
"loss": 3.5074,
"step": 82500
},
{
"epoch": 7.41,
"learning_rate": 8.500000000000002e-06,
"loss": 3.5149,
"step": 83000
},
{
"epoch": 7.45,
"learning_rate": 8.25e-06,
"loss": 3.5063,
"step": 83500
},
{
"epoch": 7.49,
"learning_rate": 8.000000000000001e-06,
"loss": 3.5093,
"step": 84000
},
{
"epoch": 7.54,
"learning_rate": 7.75e-06,
"loss": 3.5091,
"step": 84500
},
{
"epoch": 7.58,
"learning_rate": 7.5e-06,
"loss": 3.4986,
"step": 85000
},
{
"epoch": 7.63,
"learning_rate": 7.25e-06,
"loss": 3.5015,
"step": 85500
},
{
"epoch": 7.67,
"learning_rate": 7.000000000000001e-06,
"loss": 3.5016,
"step": 86000
},
{
"epoch": 7.72,
"learning_rate": 6.750000000000001e-06,
"loss": 3.4985,
"step": 86500
},
{
"epoch": 7.76,
"learning_rate": 6.5000000000000004e-06,
"loss": 3.4903,
"step": 87000
},
{
"epoch": 7.81,
"learning_rate": 6.25e-06,
"loss": 3.4943,
"step": 87500
},
{
"epoch": 7.85,
"learning_rate": 6e-06,
"loss": 3.4926,
"step": 88000
},
{
"epoch": 7.9,
"learning_rate": 5.750000000000001e-06,
"loss": 3.4983,
"step": 88500
},
{
"epoch": 7.94,
"learning_rate": 5.500000000000001e-06,
"loss": 3.4924,
"step": 89000
},
{
"epoch": 7.99,
"learning_rate": 5.25e-06,
"loss": 3.4954,
"step": 89500
},
{
"epoch": 8.03,
"learning_rate": 5e-06,
"loss": 3.4845,
"step": 90000
},
{
"epoch": 8.07,
"learning_rate": 4.75e-06,
"loss": 3.477,
"step": 90500
},
{
"epoch": 8.12,
"learning_rate": 4.5e-06,
"loss": 3.4813,
"step": 91000
},
{
"epoch": 8.16,
"learning_rate": 4.250000000000001e-06,
"loss": 3.4786,
"step": 91500
},
{
"epoch": 8.21,
"learning_rate": 4.000000000000001e-06,
"loss": 3.4809,
"step": 92000
},
{
"epoch": 8.25,
"learning_rate": 3.75e-06,
"loss": 3.4838,
"step": 92500
},
{
"epoch": 8.3,
"learning_rate": 3.5000000000000004e-06,
"loss": 3.4789,
"step": 93000
},
{
"epoch": 8.34,
"learning_rate": 3.2500000000000002e-06,
"loss": 3.4755,
"step": 93500
},
{
"epoch": 8.39,
"learning_rate": 3e-06,
"loss": 3.4772,
"step": 94000
},
{
"epoch": 8.43,
"learning_rate": 2.7500000000000004e-06,
"loss": 3.4663,
"step": 94500
},
{
"epoch": 8.48,
"learning_rate": 2.5e-06,
"loss": 3.4801,
"step": 95000
},
{
"epoch": 8.52,
"learning_rate": 2.25e-06,
"loss": 3.4619,
"step": 95500
},
{
"epoch": 8.57,
"learning_rate": 2.0000000000000003e-06,
"loss": 3.4689,
"step": 96000
},
{
"epoch": 8.61,
"learning_rate": 1.7500000000000002e-06,
"loss": 3.469,
"step": 96500
},
{
"epoch": 8.65,
"learning_rate": 1.5e-06,
"loss": 3.4666,
"step": 97000
},
{
"epoch": 8.7,
"learning_rate": 1.25e-06,
"loss": 3.463,
"step": 97500
},
{
"epoch": 8.74,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.4597,
"step": 98000
},
{
"epoch": 8.79,
"learning_rate": 7.5e-07,
"loss": 3.4758,
"step": 98500
},
{
"epoch": 8.83,
"learning_rate": 5.000000000000001e-07,
"loss": 3.463,
"step": 99000
},
{
"epoch": 8.88,
"learning_rate": 2.5000000000000004e-07,
"loss": 3.4609,
"step": 99500
},
{
"epoch": 8.92,
"learning_rate": 0.0,
"loss": 3.4677,
"step": 100000
}
],
"max_steps": 100000,
"num_train_epochs": 9,
"total_flos": 2.5251582638644695e+18,
"trial_name": null,
"trial_params": null
}