ColPali
Safetensors
English
vidore_no_match
colpali-hard-v1.1 / checkpoint-3500 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
c62bfc8 verified
raw
history blame contribute delete
No virus
73.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9516041326808048,
"eval_steps": 50,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027188689505165853,
"grad_norm": 0.484375,
"learning_rate": 5e-06,
"loss": 0.7094,
"step": 10
},
{
"epoch": 0.005437737901033171,
"grad_norm": 0.609375,
"learning_rate": 1e-05,
"loss": 0.7117,
"step": 20
},
{
"epoch": 0.008156606851549755,
"grad_norm": 0.515625,
"learning_rate": 1.5e-05,
"loss": 0.7047,
"step": 30
},
{
"epoch": 0.010875475802066341,
"grad_norm": 0.353515625,
"learning_rate": 2e-05,
"loss": 0.7059,
"step": 40
},
{
"epoch": 0.013594344752582926,
"grad_norm": 0.43359375,
"learning_rate": 2.5e-05,
"loss": 0.7016,
"step": 50
},
{
"epoch": 0.013594344752582926,
"eval_loss": 0.6993125081062317,
"eval_runtime": 144.0611,
"eval_samples_per_second": 3.471,
"eval_steps_per_second": 0.111,
"step": 50
},
{
"epoch": 0.01631321370309951,
"grad_norm": 0.31640625,
"learning_rate": 3e-05,
"loss": 0.7023,
"step": 60
},
{
"epoch": 0.019032082653616094,
"grad_norm": 0.3125,
"learning_rate": 3.5e-05,
"loss": 0.6961,
"step": 70
},
{
"epoch": 0.021750951604132682,
"grad_norm": 0.357421875,
"learning_rate": 4e-05,
"loss": 0.6957,
"step": 80
},
{
"epoch": 0.024469820554649267,
"grad_norm": 0.3515625,
"learning_rate": 4.5e-05,
"loss": 0.6902,
"step": 90
},
{
"epoch": 0.027188689505165852,
"grad_norm": 0.5390625,
"learning_rate": 5e-05,
"loss": 0.6828,
"step": 100
},
{
"epoch": 0.027188689505165852,
"eval_loss": 0.6763437390327454,
"eval_runtime": 120.6338,
"eval_samples_per_second": 4.145,
"eval_steps_per_second": 0.133,
"step": 100
},
{
"epoch": 0.029907558455682437,
"grad_norm": 0.87890625,
"learning_rate": 4.9860257126886535e-05,
"loss": 0.6664,
"step": 110
},
{
"epoch": 0.03262642740619902,
"grad_norm": 1.125,
"learning_rate": 4.972051425377306e-05,
"loss": 0.6402,
"step": 120
},
{
"epoch": 0.03534529635671561,
"grad_norm": 1.7578125,
"learning_rate": 4.9580771380659594e-05,
"loss": 0.591,
"step": 130
},
{
"epoch": 0.03806416530723219,
"grad_norm": 1.4609375,
"learning_rate": 4.944102850754612e-05,
"loss": 0.5555,
"step": 140
},
{
"epoch": 0.040783034257748776,
"grad_norm": 2.71875,
"learning_rate": 4.9301285634432645e-05,
"loss": 0.5365,
"step": 150
},
{
"epoch": 0.040783034257748776,
"eval_loss": 0.5141562223434448,
"eval_runtime": 107.3892,
"eval_samples_per_second": 4.656,
"eval_steps_per_second": 0.149,
"step": 150
},
{
"epoch": 0.043501903208265365,
"grad_norm": 2.40625,
"learning_rate": 4.916154276131917e-05,
"loss": 0.4957,
"step": 160
},
{
"epoch": 0.046220772158781946,
"grad_norm": 2.5625,
"learning_rate": 4.9021799888205704e-05,
"loss": 0.4451,
"step": 170
},
{
"epoch": 0.048939641109298535,
"grad_norm": 2.28125,
"learning_rate": 4.888205701509223e-05,
"loss": 0.4641,
"step": 180
},
{
"epoch": 0.051658510059815116,
"grad_norm": 1.6875,
"learning_rate": 4.874231414197876e-05,
"loss": 0.434,
"step": 190
},
{
"epoch": 0.054377379010331704,
"grad_norm": 2.671875,
"learning_rate": 4.860257126886529e-05,
"loss": 0.3877,
"step": 200
},
{
"epoch": 0.054377379010331704,
"eval_loss": 0.41612499952316284,
"eval_runtime": 106.701,
"eval_samples_per_second": 4.686,
"eval_steps_per_second": 0.15,
"step": 200
},
{
"epoch": 0.057096247960848286,
"grad_norm": 2.015625,
"learning_rate": 4.846282839575182e-05,
"loss": 0.3903,
"step": 210
},
{
"epoch": 0.059815116911364874,
"grad_norm": 3.34375,
"learning_rate": 4.8323085522638347e-05,
"loss": 0.3504,
"step": 220
},
{
"epoch": 0.06253398586188146,
"grad_norm": 3.28125,
"learning_rate": 4.818334264952488e-05,
"loss": 0.3375,
"step": 230
},
{
"epoch": 0.06525285481239804,
"grad_norm": 2.203125,
"learning_rate": 4.8043599776411405e-05,
"loss": 0.3176,
"step": 240
},
{
"epoch": 0.06797172376291463,
"grad_norm": 2.78125,
"learning_rate": 4.790385690329793e-05,
"loss": 0.3045,
"step": 250
},
{
"epoch": 0.06797172376291463,
"eval_loss": 0.32106250524520874,
"eval_runtime": 105.1814,
"eval_samples_per_second": 4.754,
"eval_steps_per_second": 0.152,
"step": 250
},
{
"epoch": 0.07069059271343121,
"grad_norm": 1.5703125,
"learning_rate": 4.7764114030184464e-05,
"loss": 0.3239,
"step": 260
},
{
"epoch": 0.0734094616639478,
"grad_norm": 1.84375,
"learning_rate": 4.762437115707099e-05,
"loss": 0.2965,
"step": 270
},
{
"epoch": 0.07612833061446438,
"grad_norm": 3.203125,
"learning_rate": 4.748462828395752e-05,
"loss": 0.3126,
"step": 280
},
{
"epoch": 0.07884719956498097,
"grad_norm": 2.5625,
"learning_rate": 4.734488541084405e-05,
"loss": 0.2654,
"step": 290
},
{
"epoch": 0.08156606851549755,
"grad_norm": 2.03125,
"learning_rate": 4.720514253773058e-05,
"loss": 0.3025,
"step": 300
},
{
"epoch": 0.08156606851549755,
"eval_loss": 0.2882343828678131,
"eval_runtime": 101.5553,
"eval_samples_per_second": 4.923,
"eval_steps_per_second": 0.158,
"step": 300
},
{
"epoch": 0.08428493746601413,
"grad_norm": 2.296875,
"learning_rate": 4.7065399664617106e-05,
"loss": 0.2987,
"step": 310
},
{
"epoch": 0.08700380641653073,
"grad_norm": 1.296875,
"learning_rate": 4.692565679150364e-05,
"loss": 0.2868,
"step": 320
},
{
"epoch": 0.08972267536704731,
"grad_norm": 2.40625,
"learning_rate": 4.6785913918390165e-05,
"loss": 0.3149,
"step": 330
},
{
"epoch": 0.09244154431756389,
"grad_norm": 2.015625,
"learning_rate": 4.664617104527669e-05,
"loss": 0.231,
"step": 340
},
{
"epoch": 0.09516041326808047,
"grad_norm": 1.9140625,
"learning_rate": 4.650642817216322e-05,
"loss": 0.2977,
"step": 350
},
{
"epoch": 0.09516041326808047,
"eval_loss": 0.2687968611717224,
"eval_runtime": 135.1857,
"eval_samples_per_second": 3.699,
"eval_steps_per_second": 0.118,
"step": 350
},
{
"epoch": 0.09787928221859707,
"grad_norm": 1.640625,
"learning_rate": 4.636668529904975e-05,
"loss": 0.2553,
"step": 360
},
{
"epoch": 0.10059815116911365,
"grad_norm": 2.171875,
"learning_rate": 4.622694242593628e-05,
"loss": 0.2471,
"step": 370
},
{
"epoch": 0.10331702011963023,
"grad_norm": 1.8203125,
"learning_rate": 4.608719955282281e-05,
"loss": 0.2753,
"step": 380
},
{
"epoch": 0.10603588907014681,
"grad_norm": 2.09375,
"learning_rate": 4.594745667970934e-05,
"loss": 0.2913,
"step": 390
},
{
"epoch": 0.10875475802066341,
"grad_norm": 1.8515625,
"learning_rate": 4.5807713806595866e-05,
"loss": 0.2674,
"step": 400
},
{
"epoch": 0.10875475802066341,
"eval_loss": 0.25398436188697815,
"eval_runtime": 111.6096,
"eval_samples_per_second": 4.48,
"eval_steps_per_second": 0.143,
"step": 400
},
{
"epoch": 0.11147362697117999,
"grad_norm": 1.8828125,
"learning_rate": 4.56679709334824e-05,
"loss": 0.2763,
"step": 410
},
{
"epoch": 0.11419249592169657,
"grad_norm": 3.125,
"learning_rate": 4.5528228060368925e-05,
"loss": 0.2797,
"step": 420
},
{
"epoch": 0.11691136487221315,
"grad_norm": 1.5703125,
"learning_rate": 4.538848518725545e-05,
"loss": 0.252,
"step": 430
},
{
"epoch": 0.11963023382272975,
"grad_norm": 2.734375,
"learning_rate": 4.5248742314141976e-05,
"loss": 0.3126,
"step": 440
},
{
"epoch": 0.12234910277324633,
"grad_norm": 2.234375,
"learning_rate": 4.510899944102851e-05,
"loss": 0.2595,
"step": 450
},
{
"epoch": 0.12234910277324633,
"eval_loss": 0.2485624998807907,
"eval_runtime": 117.7724,
"eval_samples_per_second": 4.245,
"eval_steps_per_second": 0.136,
"step": 450
},
{
"epoch": 0.1250679717237629,
"grad_norm": 1.7734375,
"learning_rate": 4.4969256567915035e-05,
"loss": 0.2341,
"step": 460
},
{
"epoch": 0.1277868406742795,
"grad_norm": 2.15625,
"learning_rate": 4.482951369480157e-05,
"loss": 0.2337,
"step": 470
},
{
"epoch": 0.13050570962479607,
"grad_norm": 1.1875,
"learning_rate": 4.46897708216881e-05,
"loss": 0.2757,
"step": 480
},
{
"epoch": 0.13322457857531267,
"grad_norm": 3.390625,
"learning_rate": 4.4550027948574626e-05,
"loss": 0.2219,
"step": 490
},
{
"epoch": 0.13594344752582926,
"grad_norm": 2.34375,
"learning_rate": 4.441028507546116e-05,
"loss": 0.2371,
"step": 500
},
{
"epoch": 0.13594344752582926,
"eval_loss": 0.23534375429153442,
"eval_runtime": 104.6381,
"eval_samples_per_second": 4.778,
"eval_steps_per_second": 0.153,
"step": 500
},
{
"epoch": 0.13866231647634583,
"grad_norm": 3.984375,
"learning_rate": 4.4270542202347684e-05,
"loss": 0.2367,
"step": 510
},
{
"epoch": 0.14138118542686243,
"grad_norm": 1.8515625,
"learning_rate": 4.413079932923421e-05,
"loss": 0.2249,
"step": 520
},
{
"epoch": 0.14410005437737902,
"grad_norm": 1.6484375,
"learning_rate": 4.3991056456120736e-05,
"loss": 0.2861,
"step": 530
},
{
"epoch": 0.1468189233278956,
"grad_norm": 1.1953125,
"learning_rate": 4.385131358300727e-05,
"loss": 0.2504,
"step": 540
},
{
"epoch": 0.14953779227841218,
"grad_norm": 2.03125,
"learning_rate": 4.3711570709893795e-05,
"loss": 0.2458,
"step": 550
},
{
"epoch": 0.14953779227841218,
"eval_loss": 0.2247031182050705,
"eval_runtime": 110.8821,
"eval_samples_per_second": 4.509,
"eval_steps_per_second": 0.144,
"step": 550
},
{
"epoch": 0.15225666122892875,
"grad_norm": 4.875,
"learning_rate": 4.357182783678033e-05,
"loss": 0.2205,
"step": 560
},
{
"epoch": 0.15497553017944535,
"grad_norm": 1.8828125,
"learning_rate": 4.343208496366685e-05,
"loss": 0.2121,
"step": 570
},
{
"epoch": 0.15769439912996194,
"grad_norm": 2.671875,
"learning_rate": 4.3292342090553386e-05,
"loss": 0.2286,
"step": 580
},
{
"epoch": 0.1604132680804785,
"grad_norm": 1.8828125,
"learning_rate": 4.315259921743992e-05,
"loss": 0.2234,
"step": 590
},
{
"epoch": 0.1631321370309951,
"grad_norm": 2.609375,
"learning_rate": 4.3012856344326444e-05,
"loss": 0.2261,
"step": 600
},
{
"epoch": 0.1631321370309951,
"eval_loss": 0.22798436880111694,
"eval_runtime": 102.5588,
"eval_samples_per_second": 4.875,
"eval_steps_per_second": 0.156,
"step": 600
},
{
"epoch": 0.1658510059815117,
"grad_norm": 2.75,
"learning_rate": 4.287311347121297e-05,
"loss": 0.1937,
"step": 610
},
{
"epoch": 0.16856987493202827,
"grad_norm": 2.921875,
"learning_rate": 4.2733370598099496e-05,
"loss": 0.2187,
"step": 620
},
{
"epoch": 0.17128874388254486,
"grad_norm": 2.453125,
"learning_rate": 4.259362772498603e-05,
"loss": 0.2353,
"step": 630
},
{
"epoch": 0.17400761283306146,
"grad_norm": 2.671875,
"learning_rate": 4.2453884851872554e-05,
"loss": 0.2376,
"step": 640
},
{
"epoch": 0.17672648178357803,
"grad_norm": 1.8828125,
"learning_rate": 4.231414197875909e-05,
"loss": 0.2688,
"step": 650
},
{
"epoch": 0.17672648178357803,
"eval_loss": 0.21031250059604645,
"eval_runtime": 102.6274,
"eval_samples_per_second": 4.872,
"eval_steps_per_second": 0.156,
"step": 650
},
{
"epoch": 0.17944535073409462,
"grad_norm": 1.34375,
"learning_rate": 4.217439910564561e-05,
"loss": 0.2137,
"step": 660
},
{
"epoch": 0.1821642196846112,
"grad_norm": 1.59375,
"learning_rate": 4.2034656232532146e-05,
"loss": 0.245,
"step": 670
},
{
"epoch": 0.18488308863512778,
"grad_norm": 2.140625,
"learning_rate": 4.189491335941867e-05,
"loss": 0.2625,
"step": 680
},
{
"epoch": 0.18760195758564438,
"grad_norm": 1.8203125,
"learning_rate": 4.17551704863052e-05,
"loss": 0.2325,
"step": 690
},
{
"epoch": 0.19032082653616095,
"grad_norm": 2.9375,
"learning_rate": 4.161542761319172e-05,
"loss": 0.2058,
"step": 700
},
{
"epoch": 0.19032082653616095,
"eval_loss": 0.20640625059604645,
"eval_runtime": 103.0629,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.155,
"step": 700
},
{
"epoch": 0.19303969548667754,
"grad_norm": 1.328125,
"learning_rate": 4.1475684740078256e-05,
"loss": 0.222,
"step": 710
},
{
"epoch": 0.19575856443719414,
"grad_norm": 1.359375,
"learning_rate": 4.133594186696479e-05,
"loss": 0.2537,
"step": 720
},
{
"epoch": 0.1984774333877107,
"grad_norm": 1.1875,
"learning_rate": 4.1196198993851314e-05,
"loss": 0.2147,
"step": 730
},
{
"epoch": 0.2011963023382273,
"grad_norm": 2.09375,
"learning_rate": 4.105645612073785e-05,
"loss": 0.1797,
"step": 740
},
{
"epoch": 0.2039151712887439,
"grad_norm": 1.2109375,
"learning_rate": 4.091671324762437e-05,
"loss": 0.202,
"step": 750
},
{
"epoch": 0.2039151712887439,
"eval_loss": 0.1913750022649765,
"eval_runtime": 103.5861,
"eval_samples_per_second": 4.827,
"eval_steps_per_second": 0.154,
"step": 750
},
{
"epoch": 0.20663404023926046,
"grad_norm": 2.328125,
"learning_rate": 4.0776970374510905e-05,
"loss": 0.2025,
"step": 760
},
{
"epoch": 0.20935290918977706,
"grad_norm": 0.828125,
"learning_rate": 4.063722750139743e-05,
"loss": 0.2231,
"step": 770
},
{
"epoch": 0.21207177814029363,
"grad_norm": 2.078125,
"learning_rate": 4.049748462828396e-05,
"loss": 0.2158,
"step": 780
},
{
"epoch": 0.21479064709081022,
"grad_norm": 1.890625,
"learning_rate": 4.035774175517048e-05,
"loss": 0.2059,
"step": 790
},
{
"epoch": 0.21750951604132682,
"grad_norm": 0.984375,
"learning_rate": 4.0217998882057016e-05,
"loss": 0.1792,
"step": 800
},
{
"epoch": 0.21750951604132682,
"eval_loss": 0.20859375596046448,
"eval_runtime": 99.6459,
"eval_samples_per_second": 5.018,
"eval_steps_per_second": 0.161,
"step": 800
},
{
"epoch": 0.22022838499184338,
"grad_norm": 1.765625,
"learning_rate": 4.007825600894354e-05,
"loss": 0.2092,
"step": 810
},
{
"epoch": 0.22294725394235998,
"grad_norm": 1.7265625,
"learning_rate": 3.9938513135830074e-05,
"loss": 0.184,
"step": 820
},
{
"epoch": 0.22566612289287658,
"grad_norm": 1.578125,
"learning_rate": 3.979877026271661e-05,
"loss": 0.1796,
"step": 830
},
{
"epoch": 0.22838499184339314,
"grad_norm": 2.015625,
"learning_rate": 3.965902738960313e-05,
"loss": 0.219,
"step": 840
},
{
"epoch": 0.23110386079390974,
"grad_norm": 1.078125,
"learning_rate": 3.9519284516489665e-05,
"loss": 0.21,
"step": 850
},
{
"epoch": 0.23110386079390974,
"eval_loss": 0.18590624630451202,
"eval_runtime": 100.5192,
"eval_samples_per_second": 4.974,
"eval_steps_per_second": 0.159,
"step": 850
},
{
"epoch": 0.2338227297444263,
"grad_norm": 1.1484375,
"learning_rate": 3.937954164337619e-05,
"loss": 0.1995,
"step": 860
},
{
"epoch": 0.2365415986949429,
"grad_norm": 2.375,
"learning_rate": 3.923979877026272e-05,
"loss": 0.2072,
"step": 870
},
{
"epoch": 0.2392604676454595,
"grad_norm": 2.0,
"learning_rate": 3.910005589714924e-05,
"loss": 0.1809,
"step": 880
},
{
"epoch": 0.24197933659597606,
"grad_norm": 3.203125,
"learning_rate": 3.8960313024035775e-05,
"loss": 0.2188,
"step": 890
},
{
"epoch": 0.24469820554649266,
"grad_norm": 1.8828125,
"learning_rate": 3.88205701509223e-05,
"loss": 0.2067,
"step": 900
},
{
"epoch": 0.24469820554649266,
"eval_loss": 0.18979687988758087,
"eval_runtime": 109.9141,
"eval_samples_per_second": 4.549,
"eval_steps_per_second": 0.146,
"step": 900
},
{
"epoch": 0.24741707449700925,
"grad_norm": 2.421875,
"learning_rate": 3.8680827277808834e-05,
"loss": 0.2491,
"step": 910
},
{
"epoch": 0.2501359434475258,
"grad_norm": 3.015625,
"learning_rate": 3.854108440469536e-05,
"loss": 0.1773,
"step": 920
},
{
"epoch": 0.2528548123980424,
"grad_norm": 1.6953125,
"learning_rate": 3.840134153158189e-05,
"loss": 0.1618,
"step": 930
},
{
"epoch": 0.255573681348559,
"grad_norm": 1.9296875,
"learning_rate": 3.826159865846842e-05,
"loss": 0.2098,
"step": 940
},
{
"epoch": 0.2582925502990756,
"grad_norm": 3.625,
"learning_rate": 3.812185578535495e-05,
"loss": 0.2236,
"step": 950
},
{
"epoch": 0.2582925502990756,
"eval_loss": 0.18760937452316284,
"eval_runtime": 105.2992,
"eval_samples_per_second": 4.748,
"eval_steps_per_second": 0.152,
"step": 950
},
{
"epoch": 0.26101141924959215,
"grad_norm": 1.6328125,
"learning_rate": 3.798211291224148e-05,
"loss": 0.228,
"step": 960
},
{
"epoch": 0.26373028820010874,
"grad_norm": 1.8515625,
"learning_rate": 3.7842370039128e-05,
"loss": 0.1924,
"step": 970
},
{
"epoch": 0.26644915715062534,
"grad_norm": 1.2421875,
"learning_rate": 3.7702627166014535e-05,
"loss": 0.1952,
"step": 980
},
{
"epoch": 0.26916802610114193,
"grad_norm": 2.140625,
"learning_rate": 3.756288429290106e-05,
"loss": 0.226,
"step": 990
},
{
"epoch": 0.27188689505165853,
"grad_norm": 1.3671875,
"learning_rate": 3.7423141419787594e-05,
"loss": 0.2095,
"step": 1000
},
{
"epoch": 0.27188689505165853,
"eval_loss": 0.19670312106609344,
"eval_runtime": 100.0411,
"eval_samples_per_second": 4.998,
"eval_steps_per_second": 0.16,
"step": 1000
},
{
"epoch": 0.27460576400217507,
"grad_norm": 1.4765625,
"learning_rate": 3.728339854667412e-05,
"loss": 0.197,
"step": 1010
},
{
"epoch": 0.27732463295269166,
"grad_norm": 1.703125,
"learning_rate": 3.714365567356065e-05,
"loss": 0.1979,
"step": 1020
},
{
"epoch": 0.28004350190320826,
"grad_norm": 1.9375,
"learning_rate": 3.700391280044718e-05,
"loss": 0.2003,
"step": 1030
},
{
"epoch": 0.28276237085372485,
"grad_norm": 1.6953125,
"learning_rate": 3.686416992733371e-05,
"loss": 0.1868,
"step": 1040
},
{
"epoch": 0.28548123980424145,
"grad_norm": 1.53125,
"learning_rate": 3.6724427054220237e-05,
"loss": 0.198,
"step": 1050
},
{
"epoch": 0.28548123980424145,
"eval_loss": 0.19362500309944153,
"eval_runtime": 100.7072,
"eval_samples_per_second": 4.965,
"eval_steps_per_second": 0.159,
"step": 1050
},
{
"epoch": 0.28820010875475804,
"grad_norm": 1.640625,
"learning_rate": 3.658468418110676e-05,
"loss": 0.2059,
"step": 1060
},
{
"epoch": 0.2909189777052746,
"grad_norm": 1.1640625,
"learning_rate": 3.6444941307993295e-05,
"loss": 0.1568,
"step": 1070
},
{
"epoch": 0.2936378466557912,
"grad_norm": 1.1328125,
"learning_rate": 3.630519843487982e-05,
"loss": 0.1812,
"step": 1080
},
{
"epoch": 0.2963567156063078,
"grad_norm": 2.1875,
"learning_rate": 3.6165455561766354e-05,
"loss": 0.1819,
"step": 1090
},
{
"epoch": 0.29907558455682437,
"grad_norm": 1.546875,
"learning_rate": 3.602571268865288e-05,
"loss": 0.1739,
"step": 1100
},
{
"epoch": 0.29907558455682437,
"eval_loss": 0.18107812106609344,
"eval_runtime": 101.4517,
"eval_samples_per_second": 4.928,
"eval_steps_per_second": 0.158,
"step": 1100
},
{
"epoch": 0.30179445350734097,
"grad_norm": 1.15625,
"learning_rate": 3.588596981553941e-05,
"loss": 0.2062,
"step": 1110
},
{
"epoch": 0.3045133224578575,
"grad_norm": 2.0,
"learning_rate": 3.574622694242594e-05,
"loss": 0.2333,
"step": 1120
},
{
"epoch": 0.3072321914083741,
"grad_norm": 1.7734375,
"learning_rate": 3.560648406931247e-05,
"loss": 0.1878,
"step": 1130
},
{
"epoch": 0.3099510603588907,
"grad_norm": 2.515625,
"learning_rate": 3.5466741196198996e-05,
"loss": 0.2058,
"step": 1140
},
{
"epoch": 0.3126699293094073,
"grad_norm": 2.09375,
"learning_rate": 3.532699832308552e-05,
"loss": 0.206,
"step": 1150
},
{
"epoch": 0.3126699293094073,
"eval_loss": 0.182679682970047,
"eval_runtime": 99.428,
"eval_samples_per_second": 5.029,
"eval_steps_per_second": 0.161,
"step": 1150
},
{
"epoch": 0.3153887982599239,
"grad_norm": 1.3125,
"learning_rate": 3.518725544997205e-05,
"loss": 0.2013,
"step": 1160
},
{
"epoch": 0.3181076672104405,
"grad_norm": 1.75,
"learning_rate": 3.504751257685858e-05,
"loss": 0.2018,
"step": 1170
},
{
"epoch": 0.320826536160957,
"grad_norm": 1.265625,
"learning_rate": 3.4907769703745107e-05,
"loss": 0.2129,
"step": 1180
},
{
"epoch": 0.3235454051114736,
"grad_norm": 1.984375,
"learning_rate": 3.476802683063164e-05,
"loss": 0.1912,
"step": 1190
},
{
"epoch": 0.3262642740619902,
"grad_norm": 2.171875,
"learning_rate": 3.462828395751817e-05,
"loss": 0.1822,
"step": 1200
},
{
"epoch": 0.3262642740619902,
"eval_loss": 0.18774999678134918,
"eval_runtime": 99.1074,
"eval_samples_per_second": 5.045,
"eval_steps_per_second": 0.161,
"step": 1200
},
{
"epoch": 0.3289831430125068,
"grad_norm": 4.5625,
"learning_rate": 3.44885410844047e-05,
"loss": 0.1758,
"step": 1210
},
{
"epoch": 0.3317020119630234,
"grad_norm": 1.9921875,
"learning_rate": 3.434879821129123e-05,
"loss": 0.2345,
"step": 1220
},
{
"epoch": 0.33442088091353994,
"grad_norm": 1.3359375,
"learning_rate": 3.4209055338177756e-05,
"loss": 0.2056,
"step": 1230
},
{
"epoch": 0.33713974986405654,
"grad_norm": 1.8515625,
"learning_rate": 3.406931246506428e-05,
"loss": 0.2017,
"step": 1240
},
{
"epoch": 0.33985861881457313,
"grad_norm": 1.9375,
"learning_rate": 3.392956959195081e-05,
"loss": 0.1789,
"step": 1250
},
{
"epoch": 0.33985861881457313,
"eval_loss": 0.18862499296665192,
"eval_runtime": 100.4085,
"eval_samples_per_second": 4.98,
"eval_steps_per_second": 0.159,
"step": 1250
},
{
"epoch": 0.3425774877650897,
"grad_norm": 1.3046875,
"learning_rate": 3.378982671883734e-05,
"loss": 0.1966,
"step": 1260
},
{
"epoch": 0.3452963567156063,
"grad_norm": 2.234375,
"learning_rate": 3.3650083845723866e-05,
"loss": 0.201,
"step": 1270
},
{
"epoch": 0.3480152256661229,
"grad_norm": 1.375,
"learning_rate": 3.35103409726104e-05,
"loss": 0.186,
"step": 1280
},
{
"epoch": 0.35073409461663946,
"grad_norm": 2.3125,
"learning_rate": 3.3370598099496925e-05,
"loss": 0.1931,
"step": 1290
},
{
"epoch": 0.35345296356715605,
"grad_norm": 1.6015625,
"learning_rate": 3.323085522638346e-05,
"loss": 0.1633,
"step": 1300
},
{
"epoch": 0.35345296356715605,
"eval_loss": 0.17839062213897705,
"eval_runtime": 97.1074,
"eval_samples_per_second": 5.149,
"eval_steps_per_second": 0.165,
"step": 1300
},
{
"epoch": 0.35617183251767265,
"grad_norm": 1.4453125,
"learning_rate": 3.309111235326999e-05,
"loss": 0.1765,
"step": 1310
},
{
"epoch": 0.35889070146818924,
"grad_norm": 3.640625,
"learning_rate": 3.2951369480156516e-05,
"loss": 0.1793,
"step": 1320
},
{
"epoch": 0.36160957041870584,
"grad_norm": 1.34375,
"learning_rate": 3.281162660704304e-05,
"loss": 0.1976,
"step": 1330
},
{
"epoch": 0.3643284393692224,
"grad_norm": 1.5,
"learning_rate": 3.267188373392957e-05,
"loss": 0.2001,
"step": 1340
},
{
"epoch": 0.367047308319739,
"grad_norm": 0.9140625,
"learning_rate": 3.25321408608161e-05,
"loss": 0.1723,
"step": 1350
},
{
"epoch": 0.367047308319739,
"eval_loss": 0.17289061844348907,
"eval_runtime": 97.8393,
"eval_samples_per_second": 5.11,
"eval_steps_per_second": 0.164,
"step": 1350
},
{
"epoch": 0.36976617727025557,
"grad_norm": 1.734375,
"learning_rate": 3.2392397987702626e-05,
"loss": 0.2236,
"step": 1360
},
{
"epoch": 0.37248504622077216,
"grad_norm": 1.2109375,
"learning_rate": 3.225265511458916e-05,
"loss": 0.1914,
"step": 1370
},
{
"epoch": 0.37520391517128876,
"grad_norm": 2.828125,
"learning_rate": 3.2112912241475685e-05,
"loss": 0.1715,
"step": 1380
},
{
"epoch": 0.37792278412180536,
"grad_norm": 2.265625,
"learning_rate": 3.197316936836222e-05,
"loss": 0.1694,
"step": 1390
},
{
"epoch": 0.3806416530723219,
"grad_norm": 1.671875,
"learning_rate": 3.183342649524874e-05,
"loss": 0.1828,
"step": 1400
},
{
"epoch": 0.3806416530723219,
"eval_loss": 0.1787421852350235,
"eval_runtime": 105.4565,
"eval_samples_per_second": 4.741,
"eval_steps_per_second": 0.152,
"step": 1400
},
{
"epoch": 0.3833605220228385,
"grad_norm": 1.4296875,
"learning_rate": 3.1693683622135276e-05,
"loss": 0.224,
"step": 1410
},
{
"epoch": 0.3860793909733551,
"grad_norm": 2.484375,
"learning_rate": 3.15539407490218e-05,
"loss": 0.1992,
"step": 1420
},
{
"epoch": 0.3887982599238717,
"grad_norm": 3.0625,
"learning_rate": 3.141419787590833e-05,
"loss": 0.2106,
"step": 1430
},
{
"epoch": 0.3915171288743883,
"grad_norm": 1.1875,
"learning_rate": 3.127445500279486e-05,
"loss": 0.1732,
"step": 1440
},
{
"epoch": 0.3942359978249048,
"grad_norm": 1.5625,
"learning_rate": 3.1134712129681386e-05,
"loss": 0.1865,
"step": 1450
},
{
"epoch": 0.3942359978249048,
"eval_loss": 0.1751093715429306,
"eval_runtime": 97.1038,
"eval_samples_per_second": 5.149,
"eval_steps_per_second": 0.165,
"step": 1450
},
{
"epoch": 0.3969548667754214,
"grad_norm": 1.8515625,
"learning_rate": 3.099496925656792e-05,
"loss": 0.2113,
"step": 1460
},
{
"epoch": 0.399673735725938,
"grad_norm": 1.453125,
"learning_rate": 3.0855226383454444e-05,
"loss": 0.2173,
"step": 1470
},
{
"epoch": 0.4023926046764546,
"grad_norm": 2.046875,
"learning_rate": 3.071548351034098e-05,
"loss": 0.1893,
"step": 1480
},
{
"epoch": 0.4051114736269712,
"grad_norm": 2.0,
"learning_rate": 3.05757406372275e-05,
"loss": 0.1885,
"step": 1490
},
{
"epoch": 0.4078303425774878,
"grad_norm": 1.4140625,
"learning_rate": 3.0435997764114032e-05,
"loss": 0.1642,
"step": 1500
},
{
"epoch": 0.4078303425774878,
"eval_loss": 0.1752343773841858,
"eval_runtime": 97.2789,
"eval_samples_per_second": 5.14,
"eval_steps_per_second": 0.164,
"step": 1500
},
{
"epoch": 0.41054921152800433,
"grad_norm": 2.359375,
"learning_rate": 3.0296254891000558e-05,
"loss": 0.189,
"step": 1510
},
{
"epoch": 0.4132680804785209,
"grad_norm": 1.5546875,
"learning_rate": 3.015651201788709e-05,
"loss": 0.1399,
"step": 1520
},
{
"epoch": 0.4159869494290375,
"grad_norm": 0.9921875,
"learning_rate": 3.0016769144773617e-05,
"loss": 0.2088,
"step": 1530
},
{
"epoch": 0.4187058183795541,
"grad_norm": 1.7421875,
"learning_rate": 2.9877026271660146e-05,
"loss": 0.2048,
"step": 1540
},
{
"epoch": 0.4214246873300707,
"grad_norm": 1.1640625,
"learning_rate": 2.973728339854668e-05,
"loss": 0.1898,
"step": 1550
},
{
"epoch": 0.4214246873300707,
"eval_loss": 0.17342187464237213,
"eval_runtime": 95.7506,
"eval_samples_per_second": 5.222,
"eval_steps_per_second": 0.167,
"step": 1550
},
{
"epoch": 0.42414355628058725,
"grad_norm": 2.71875,
"learning_rate": 2.9597540525433204e-05,
"loss": 0.2081,
"step": 1560
},
{
"epoch": 0.42686242523110385,
"grad_norm": 2.34375,
"learning_rate": 2.9457797652319734e-05,
"loss": 0.1575,
"step": 1570
},
{
"epoch": 0.42958129418162044,
"grad_norm": 3.21875,
"learning_rate": 2.931805477920626e-05,
"loss": 0.1849,
"step": 1580
},
{
"epoch": 0.43230016313213704,
"grad_norm": 1.7421875,
"learning_rate": 2.9178311906092792e-05,
"loss": 0.2059,
"step": 1590
},
{
"epoch": 0.43501903208265363,
"grad_norm": 1.2421875,
"learning_rate": 2.9038569032979318e-05,
"loss": 0.1641,
"step": 1600
},
{
"epoch": 0.43501903208265363,
"eval_loss": 0.1657109409570694,
"eval_runtime": 96.6429,
"eval_samples_per_second": 5.174,
"eval_steps_per_second": 0.166,
"step": 1600
},
{
"epoch": 0.4377379010331702,
"grad_norm": 1.953125,
"learning_rate": 2.889882615986585e-05,
"loss": 0.1906,
"step": 1610
},
{
"epoch": 0.44045676998368677,
"grad_norm": 1.6328125,
"learning_rate": 2.8759083286752376e-05,
"loss": 0.1907,
"step": 1620
},
{
"epoch": 0.44317563893420336,
"grad_norm": 4.625,
"learning_rate": 2.8619340413638906e-05,
"loss": 0.2193,
"step": 1630
},
{
"epoch": 0.44589450788471996,
"grad_norm": 1.515625,
"learning_rate": 2.847959754052543e-05,
"loss": 0.1714,
"step": 1640
},
{
"epoch": 0.44861337683523655,
"grad_norm": 1.8125,
"learning_rate": 2.8339854667411964e-05,
"loss": 0.1771,
"step": 1650
},
{
"epoch": 0.44861337683523655,
"eval_loss": 0.18095313012599945,
"eval_runtime": 95.4318,
"eval_samples_per_second": 5.239,
"eval_steps_per_second": 0.168,
"step": 1650
},
{
"epoch": 0.45133224578575315,
"grad_norm": 1.9296875,
"learning_rate": 2.820011179429849e-05,
"loss": 0.1917,
"step": 1660
},
{
"epoch": 0.4540511147362697,
"grad_norm": 1.8046875,
"learning_rate": 2.806036892118502e-05,
"loss": 0.1915,
"step": 1670
},
{
"epoch": 0.4567699836867863,
"grad_norm": 2.53125,
"learning_rate": 2.7920626048071552e-05,
"loss": 0.1436,
"step": 1680
},
{
"epoch": 0.4594888526373029,
"grad_norm": 0.921875,
"learning_rate": 2.7780883174958078e-05,
"loss": 0.1683,
"step": 1690
},
{
"epoch": 0.4622077215878195,
"grad_norm": 1.640625,
"learning_rate": 2.764114030184461e-05,
"loss": 0.1762,
"step": 1700
},
{
"epoch": 0.4622077215878195,
"eval_loss": 0.1666562557220459,
"eval_runtime": 96.3207,
"eval_samples_per_second": 5.191,
"eval_steps_per_second": 0.166,
"step": 1700
},
{
"epoch": 0.46492659053833607,
"grad_norm": 2.484375,
"learning_rate": 2.7501397428731136e-05,
"loss": 0.1977,
"step": 1710
},
{
"epoch": 0.4676454594888526,
"grad_norm": 2.171875,
"learning_rate": 2.7361654555617665e-05,
"loss": 0.2066,
"step": 1720
},
{
"epoch": 0.4703643284393692,
"grad_norm": 2.5,
"learning_rate": 2.722191168250419e-05,
"loss": 0.1756,
"step": 1730
},
{
"epoch": 0.4730831973898858,
"grad_norm": 1.5625,
"learning_rate": 2.7082168809390724e-05,
"loss": 0.1871,
"step": 1740
},
{
"epoch": 0.4758020663404024,
"grad_norm": 1.8046875,
"learning_rate": 2.694242593627725e-05,
"loss": 0.2212,
"step": 1750
},
{
"epoch": 0.4758020663404024,
"eval_loss": 0.15598437190055847,
"eval_runtime": 94.5918,
"eval_samples_per_second": 5.286,
"eval_steps_per_second": 0.169,
"step": 1750
},
{
"epoch": 0.478520935290919,
"grad_norm": 2.1875,
"learning_rate": 2.680268306316378e-05,
"loss": 0.1998,
"step": 1760
},
{
"epoch": 0.4812398042414356,
"grad_norm": 2.046875,
"learning_rate": 2.6662940190050305e-05,
"loss": 0.1886,
"step": 1770
},
{
"epoch": 0.4839586731919521,
"grad_norm": 1.5390625,
"learning_rate": 2.6523197316936838e-05,
"loss": 0.1857,
"step": 1780
},
{
"epoch": 0.4866775421424687,
"grad_norm": 1.2421875,
"learning_rate": 2.638345444382337e-05,
"loss": 0.1601,
"step": 1790
},
{
"epoch": 0.4893964110929853,
"grad_norm": 1.3984375,
"learning_rate": 2.6243711570709896e-05,
"loss": 0.1989,
"step": 1800
},
{
"epoch": 0.4893964110929853,
"eval_loss": 0.17681249976158142,
"eval_runtime": 97.019,
"eval_samples_per_second": 5.154,
"eval_steps_per_second": 0.165,
"step": 1800
},
{
"epoch": 0.4921152800435019,
"grad_norm": 0.94140625,
"learning_rate": 2.6103968697596425e-05,
"loss": 0.1391,
"step": 1810
},
{
"epoch": 0.4948341489940185,
"grad_norm": 2.765625,
"learning_rate": 2.596422582448295e-05,
"loss": 0.211,
"step": 1820
},
{
"epoch": 0.49755301794453505,
"grad_norm": 2.859375,
"learning_rate": 2.5824482951369484e-05,
"loss": 0.1558,
"step": 1830
},
{
"epoch": 0.5002718868950516,
"grad_norm": 2.796875,
"learning_rate": 2.568474007825601e-05,
"loss": 0.1764,
"step": 1840
},
{
"epoch": 0.5029907558455683,
"grad_norm": 1.609375,
"learning_rate": 2.554499720514254e-05,
"loss": 0.1547,
"step": 1850
},
{
"epoch": 0.5029907558455683,
"eval_loss": 0.1746249943971634,
"eval_runtime": 95.3118,
"eval_samples_per_second": 5.246,
"eval_steps_per_second": 0.168,
"step": 1850
},
{
"epoch": 0.5057096247960848,
"grad_norm": 2.1875,
"learning_rate": 2.5405254332029065e-05,
"loss": 0.1657,
"step": 1860
},
{
"epoch": 0.5084284937466014,
"grad_norm": 1.828125,
"learning_rate": 2.5265511458915597e-05,
"loss": 0.1985,
"step": 1870
},
{
"epoch": 0.511147362697118,
"grad_norm": 1.1875,
"learning_rate": 2.5125768585802123e-05,
"loss": 0.1828,
"step": 1880
},
{
"epoch": 0.5138662316476346,
"grad_norm": 1.1953125,
"learning_rate": 2.4986025712688656e-05,
"loss": 0.1603,
"step": 1890
},
{
"epoch": 0.5165851005981512,
"grad_norm": 1.8203125,
"learning_rate": 2.484628283957518e-05,
"loss": 0.1718,
"step": 1900
},
{
"epoch": 0.5165851005981512,
"eval_loss": 0.1659843772649765,
"eval_runtime": 117.6794,
"eval_samples_per_second": 4.249,
"eval_steps_per_second": 0.136,
"step": 1900
},
{
"epoch": 0.5193039695486678,
"grad_norm": 0.7421875,
"learning_rate": 2.470653996646171e-05,
"loss": 0.1812,
"step": 1910
},
{
"epoch": 0.5220228384991843,
"grad_norm": 1.4375,
"learning_rate": 2.456679709334824e-05,
"loss": 0.1624,
"step": 1920
},
{
"epoch": 0.524741707449701,
"grad_norm": 1.90625,
"learning_rate": 2.442705422023477e-05,
"loss": 0.1344,
"step": 1930
},
{
"epoch": 0.5274605764002175,
"grad_norm": 1.6015625,
"learning_rate": 2.42873113471213e-05,
"loss": 0.1823,
"step": 1940
},
{
"epoch": 0.5301794453507341,
"grad_norm": 2.515625,
"learning_rate": 2.4147568474007825e-05,
"loss": 0.1742,
"step": 1950
},
{
"epoch": 0.5301794453507341,
"eval_loss": 0.1723593771457672,
"eval_runtime": 104.5426,
"eval_samples_per_second": 4.783,
"eval_steps_per_second": 0.153,
"step": 1950
},
{
"epoch": 0.5328983143012507,
"grad_norm": 4.375,
"learning_rate": 2.4007825600894354e-05,
"loss": 0.2232,
"step": 1960
},
{
"epoch": 0.5356171832517672,
"grad_norm": 1.6640625,
"learning_rate": 2.3868082727780886e-05,
"loss": 0.1532,
"step": 1970
},
{
"epoch": 0.5383360522022839,
"grad_norm": 2.265625,
"learning_rate": 2.3728339854667416e-05,
"loss": 0.171,
"step": 1980
},
{
"epoch": 0.5410549211528004,
"grad_norm": 1.4375,
"learning_rate": 2.358859698155394e-05,
"loss": 0.2081,
"step": 1990
},
{
"epoch": 0.5437737901033171,
"grad_norm": 1.6640625,
"learning_rate": 2.344885410844047e-05,
"loss": 0.182,
"step": 2000
},
{
"epoch": 0.5437737901033171,
"eval_loss": 0.1640625,
"eval_runtime": 99.8495,
"eval_samples_per_second": 5.008,
"eval_steps_per_second": 0.16,
"step": 2000
},
{
"epoch": 0.5464926590538336,
"grad_norm": 1.203125,
"learning_rate": 2.3309111235327e-05,
"loss": 0.1363,
"step": 2010
},
{
"epoch": 0.5492115280043501,
"grad_norm": 1.8984375,
"learning_rate": 2.316936836221353e-05,
"loss": 0.1666,
"step": 2020
},
{
"epoch": 0.5519303969548668,
"grad_norm": 1.6796875,
"learning_rate": 2.302962548910006e-05,
"loss": 0.1405,
"step": 2030
},
{
"epoch": 0.5546492659053833,
"grad_norm": 1.3046875,
"learning_rate": 2.2889882615986584e-05,
"loss": 0.1768,
"step": 2040
},
{
"epoch": 0.5573681348559,
"grad_norm": 3.140625,
"learning_rate": 2.2750139742873114e-05,
"loss": 0.2092,
"step": 2050
},
{
"epoch": 0.5573681348559,
"eval_loss": 0.1575937569141388,
"eval_runtime": 99.6448,
"eval_samples_per_second": 5.018,
"eval_steps_per_second": 0.161,
"step": 2050
},
{
"epoch": 0.5600870038064165,
"grad_norm": 2.09375,
"learning_rate": 2.2610396869759643e-05,
"loss": 0.1888,
"step": 2060
},
{
"epoch": 0.5628058727569332,
"grad_norm": 1.921875,
"learning_rate": 2.2470653996646172e-05,
"loss": 0.1454,
"step": 2070
},
{
"epoch": 0.5655247417074497,
"grad_norm": 1.0703125,
"learning_rate": 2.2330911123532698e-05,
"loss": 0.1697,
"step": 2080
},
{
"epoch": 0.5682436106579662,
"grad_norm": 3.171875,
"learning_rate": 2.219116825041923e-05,
"loss": 0.19,
"step": 2090
},
{
"epoch": 0.5709624796084829,
"grad_norm": 1.546875,
"learning_rate": 2.205142537730576e-05,
"loss": 0.1925,
"step": 2100
},
{
"epoch": 0.5709624796084829,
"eval_loss": 0.16167187690734863,
"eval_runtime": 99.0392,
"eval_samples_per_second": 5.049,
"eval_steps_per_second": 0.162,
"step": 2100
},
{
"epoch": 0.5736813485589994,
"grad_norm": 1.8828125,
"learning_rate": 2.191168250419229e-05,
"loss": 0.1822,
"step": 2110
},
{
"epoch": 0.5764002175095161,
"grad_norm": 1.4609375,
"learning_rate": 2.1771939631078815e-05,
"loss": 0.1685,
"step": 2120
},
{
"epoch": 0.5791190864600326,
"grad_norm": 1.0546875,
"learning_rate": 2.1632196757965344e-05,
"loss": 0.2201,
"step": 2130
},
{
"epoch": 0.5818379554105492,
"grad_norm": 1.0625,
"learning_rate": 2.1492453884851873e-05,
"loss": 0.1572,
"step": 2140
},
{
"epoch": 0.5845568243610658,
"grad_norm": 0.9140625,
"learning_rate": 2.1352711011738403e-05,
"loss": 0.1507,
"step": 2150
},
{
"epoch": 0.5845568243610658,
"eval_loss": 0.15681250393390656,
"eval_runtime": 96.4445,
"eval_samples_per_second": 5.184,
"eval_steps_per_second": 0.166,
"step": 2150
},
{
"epoch": 0.5872756933115824,
"grad_norm": 2.9375,
"learning_rate": 2.1212968138624932e-05,
"loss": 0.1653,
"step": 2160
},
{
"epoch": 0.589994562262099,
"grad_norm": 1.5625,
"learning_rate": 2.1073225265511458e-05,
"loss": 0.1815,
"step": 2170
},
{
"epoch": 0.5927134312126155,
"grad_norm": 1.15625,
"learning_rate": 2.0933482392397987e-05,
"loss": 0.1157,
"step": 2180
},
{
"epoch": 0.5954323001631321,
"grad_norm": 1.890625,
"learning_rate": 2.0793739519284516e-05,
"loss": 0.1535,
"step": 2190
},
{
"epoch": 0.5981511691136487,
"grad_norm": 1.21875,
"learning_rate": 2.0653996646171045e-05,
"loss": 0.1547,
"step": 2200
},
{
"epoch": 0.5981511691136487,
"eval_loss": 0.16277343034744263,
"eval_runtime": 96.1155,
"eval_samples_per_second": 5.202,
"eval_steps_per_second": 0.166,
"step": 2200
},
{
"epoch": 0.6008700380641653,
"grad_norm": 2.09375,
"learning_rate": 2.0514253773057575e-05,
"loss": 0.1582,
"step": 2210
},
{
"epoch": 0.6035889070146819,
"grad_norm": 1.3203125,
"learning_rate": 2.0374510899944104e-05,
"loss": 0.1592,
"step": 2220
},
{
"epoch": 0.6063077759651985,
"grad_norm": 2.6875,
"learning_rate": 2.0234768026830633e-05,
"loss": 0.1771,
"step": 2230
},
{
"epoch": 0.609026644915715,
"grad_norm": 1.9921875,
"learning_rate": 2.0095025153717162e-05,
"loss": 0.169,
"step": 2240
},
{
"epoch": 0.6117455138662317,
"grad_norm": 2.140625,
"learning_rate": 1.995528228060369e-05,
"loss": 0.1685,
"step": 2250
},
{
"epoch": 0.6117455138662317,
"eval_loss": 0.16175781190395355,
"eval_runtime": 99.1628,
"eval_samples_per_second": 5.042,
"eval_steps_per_second": 0.161,
"step": 2250
},
{
"epoch": 0.6144643828167482,
"grad_norm": 1.234375,
"learning_rate": 1.9815539407490218e-05,
"loss": 0.165,
"step": 2260
},
{
"epoch": 0.6171832517672649,
"grad_norm": 2.140625,
"learning_rate": 1.9675796534376747e-05,
"loss": 0.1928,
"step": 2270
},
{
"epoch": 0.6199021207177814,
"grad_norm": 1.6484375,
"learning_rate": 1.9536053661263276e-05,
"loss": 0.2038,
"step": 2280
},
{
"epoch": 0.622620989668298,
"grad_norm": 1.828125,
"learning_rate": 1.9396310788149805e-05,
"loss": 0.1871,
"step": 2290
},
{
"epoch": 0.6253398586188146,
"grad_norm": 1.078125,
"learning_rate": 1.9256567915036335e-05,
"loss": 0.1733,
"step": 2300
},
{
"epoch": 0.6253398586188146,
"eval_loss": 0.15731249749660492,
"eval_runtime": 95.4546,
"eval_samples_per_second": 5.238,
"eval_steps_per_second": 0.168,
"step": 2300
},
{
"epoch": 0.6280587275693311,
"grad_norm": 1.40625,
"learning_rate": 1.911682504192286e-05,
"loss": 0.1706,
"step": 2310
},
{
"epoch": 0.6307775965198478,
"grad_norm": 3.078125,
"learning_rate": 1.897708216880939e-05,
"loss": 0.1856,
"step": 2320
},
{
"epoch": 0.6334964654703643,
"grad_norm": 1.375,
"learning_rate": 1.8837339295695922e-05,
"loss": 0.1852,
"step": 2330
},
{
"epoch": 0.636215334420881,
"grad_norm": 2.3125,
"learning_rate": 1.869759642258245e-05,
"loss": 0.1748,
"step": 2340
},
{
"epoch": 0.6389342033713975,
"grad_norm": 1.2890625,
"learning_rate": 1.8557853549468977e-05,
"loss": 0.1576,
"step": 2350
},
{
"epoch": 0.6389342033713975,
"eval_loss": 0.15459375083446503,
"eval_runtime": 94.7107,
"eval_samples_per_second": 5.279,
"eval_steps_per_second": 0.169,
"step": 2350
},
{
"epoch": 0.641653072321914,
"grad_norm": 1.4921875,
"learning_rate": 1.8418110676355507e-05,
"loss": 0.1657,
"step": 2360
},
{
"epoch": 0.6443719412724307,
"grad_norm": 1.8359375,
"learning_rate": 1.8278367803242036e-05,
"loss": 0.1725,
"step": 2370
},
{
"epoch": 0.6470908102229472,
"grad_norm": 1.90625,
"learning_rate": 1.8138624930128565e-05,
"loss": 0.1667,
"step": 2380
},
{
"epoch": 0.6498096791734639,
"grad_norm": 2.203125,
"learning_rate": 1.7998882057015094e-05,
"loss": 0.1468,
"step": 2390
},
{
"epoch": 0.6525285481239804,
"grad_norm": 2.3125,
"learning_rate": 1.785913918390162e-05,
"loss": 0.1852,
"step": 2400
},
{
"epoch": 0.6525285481239804,
"eval_loss": 0.14464062452316284,
"eval_runtime": 109.621,
"eval_samples_per_second": 4.561,
"eval_steps_per_second": 0.146,
"step": 2400
},
{
"epoch": 0.655247417074497,
"grad_norm": 2.015625,
"learning_rate": 1.771939631078815e-05,
"loss": 0.1942,
"step": 2410
},
{
"epoch": 0.6579662860250136,
"grad_norm": 1.9453125,
"learning_rate": 1.757965343767468e-05,
"loss": 0.1513,
"step": 2420
},
{
"epoch": 0.6606851549755302,
"grad_norm": 1.1875,
"learning_rate": 1.7439910564561208e-05,
"loss": 0.1445,
"step": 2430
},
{
"epoch": 0.6634040239260468,
"grad_norm": 3.171875,
"learning_rate": 1.7300167691447737e-05,
"loss": 0.1874,
"step": 2440
},
{
"epoch": 0.6661228928765633,
"grad_norm": 2.5625,
"learning_rate": 1.7160424818334266e-05,
"loss": 0.1697,
"step": 2450
},
{
"epoch": 0.6661228928765633,
"eval_loss": 0.14278124272823334,
"eval_runtime": 99.7567,
"eval_samples_per_second": 5.012,
"eval_steps_per_second": 0.16,
"step": 2450
},
{
"epoch": 0.6688417618270799,
"grad_norm": 1.6171875,
"learning_rate": 1.7020681945220796e-05,
"loss": 0.1505,
"step": 2460
},
{
"epoch": 0.6715606307775965,
"grad_norm": 1.1875,
"learning_rate": 1.6880939072107325e-05,
"loss": 0.2015,
"step": 2470
},
{
"epoch": 0.6742794997281131,
"grad_norm": 1.5625,
"learning_rate": 1.6741196198993854e-05,
"loss": 0.1782,
"step": 2480
},
{
"epoch": 0.6769983686786297,
"grad_norm": 3.140625,
"learning_rate": 1.660145332588038e-05,
"loss": 0.159,
"step": 2490
},
{
"epoch": 0.6797172376291463,
"grad_norm": 1.265625,
"learning_rate": 1.646171045276691e-05,
"loss": 0.2079,
"step": 2500
},
{
"epoch": 0.6797172376291463,
"eval_loss": 0.15110155940055847,
"eval_runtime": 101.2462,
"eval_samples_per_second": 4.938,
"eval_steps_per_second": 0.158,
"step": 2500
},
{
"epoch": 0.6824361065796629,
"grad_norm": 2.046875,
"learning_rate": 1.632196757965344e-05,
"loss": 0.1347,
"step": 2510
},
{
"epoch": 0.6851549755301795,
"grad_norm": 1.140625,
"learning_rate": 1.6182224706539968e-05,
"loss": 0.1575,
"step": 2520
},
{
"epoch": 0.687873844480696,
"grad_norm": 1.8046875,
"learning_rate": 1.6042481833426497e-05,
"loss": 0.1613,
"step": 2530
},
{
"epoch": 0.6905927134312126,
"grad_norm": 2.015625,
"learning_rate": 1.5902738960313023e-05,
"loss": 0.1625,
"step": 2540
},
{
"epoch": 0.6933115823817292,
"grad_norm": 3.421875,
"learning_rate": 1.5762996087199552e-05,
"loss": 0.1748,
"step": 2550
},
{
"epoch": 0.6933115823817292,
"eval_loss": 0.15357030928134918,
"eval_runtime": 102.5541,
"eval_samples_per_second": 4.875,
"eval_steps_per_second": 0.156,
"step": 2550
},
{
"epoch": 0.6960304513322458,
"grad_norm": 2.3125,
"learning_rate": 1.562325321408608e-05,
"loss": 0.1767,
"step": 2560
},
{
"epoch": 0.6987493202827624,
"grad_norm": 1.734375,
"learning_rate": 1.5483510340972614e-05,
"loss": 0.1507,
"step": 2570
},
{
"epoch": 0.7014681892332789,
"grad_norm": 2.0625,
"learning_rate": 1.534376746785914e-05,
"loss": 0.13,
"step": 2580
},
{
"epoch": 0.7041870581837956,
"grad_norm": 1.3671875,
"learning_rate": 1.5204024594745669e-05,
"loss": 0.1576,
"step": 2590
},
{
"epoch": 0.7069059271343121,
"grad_norm": 1.0546875,
"learning_rate": 1.5064281721632198e-05,
"loss": 0.173,
"step": 2600
},
{
"epoch": 0.7069059271343121,
"eval_loss": 0.15370312333106995,
"eval_runtime": 102.0222,
"eval_samples_per_second": 4.901,
"eval_steps_per_second": 0.157,
"step": 2600
},
{
"epoch": 0.7096247960848288,
"grad_norm": 0.62890625,
"learning_rate": 1.4924538848518726e-05,
"loss": 0.1421,
"step": 2610
},
{
"epoch": 0.7123436650353453,
"grad_norm": 1.8125,
"learning_rate": 1.4784795975405255e-05,
"loss": 0.1548,
"step": 2620
},
{
"epoch": 0.7150625339858618,
"grad_norm": 0.96484375,
"learning_rate": 1.4645053102291784e-05,
"loss": 0.1422,
"step": 2630
},
{
"epoch": 0.7177814029363785,
"grad_norm": 1.8125,
"learning_rate": 1.4505310229178312e-05,
"loss": 0.1577,
"step": 2640
},
{
"epoch": 0.720500271886895,
"grad_norm": 1.046875,
"learning_rate": 1.4365567356064841e-05,
"loss": 0.1542,
"step": 2650
},
{
"epoch": 0.720500271886895,
"eval_loss": 0.16090625524520874,
"eval_runtime": 105.0015,
"eval_samples_per_second": 4.762,
"eval_steps_per_second": 0.152,
"step": 2650
},
{
"epoch": 0.7232191408374117,
"grad_norm": 3.53125,
"learning_rate": 1.4225824482951369e-05,
"loss": 0.1655,
"step": 2660
},
{
"epoch": 0.7259380097879282,
"grad_norm": 0.96484375,
"learning_rate": 1.4086081609837898e-05,
"loss": 0.1508,
"step": 2670
},
{
"epoch": 0.7286568787384448,
"grad_norm": 1.9609375,
"learning_rate": 1.3946338736724427e-05,
"loss": 0.1455,
"step": 2680
},
{
"epoch": 0.7313757476889614,
"grad_norm": 2.171875,
"learning_rate": 1.3806595863610958e-05,
"loss": 0.1408,
"step": 2690
},
{
"epoch": 0.734094616639478,
"grad_norm": 1.125,
"learning_rate": 1.3666852990497486e-05,
"loss": 0.1562,
"step": 2700
},
{
"epoch": 0.734094616639478,
"eval_loss": 0.15767186880111694,
"eval_runtime": 98.7176,
"eval_samples_per_second": 5.065,
"eval_steps_per_second": 0.162,
"step": 2700
},
{
"epoch": 0.7368134855899946,
"grad_norm": 0.875,
"learning_rate": 1.3527110117384015e-05,
"loss": 0.1621,
"step": 2710
},
{
"epoch": 0.7395323545405111,
"grad_norm": 1.578125,
"learning_rate": 1.3387367244270544e-05,
"loss": 0.1673,
"step": 2720
},
{
"epoch": 0.7422512234910277,
"grad_norm": 1.1640625,
"learning_rate": 1.3247624371157072e-05,
"loss": 0.1708,
"step": 2730
},
{
"epoch": 0.7449700924415443,
"grad_norm": 1.4765625,
"learning_rate": 1.3107881498043601e-05,
"loss": 0.1295,
"step": 2740
},
{
"epoch": 0.7476889613920609,
"grad_norm": 1.9765625,
"learning_rate": 1.2968138624930128e-05,
"loss": 0.1518,
"step": 2750
},
{
"epoch": 0.7476889613920609,
"eval_loss": 0.1566796898841858,
"eval_runtime": 99.834,
"eval_samples_per_second": 5.008,
"eval_steps_per_second": 0.16,
"step": 2750
},
{
"epoch": 0.7504078303425775,
"grad_norm": 3.34375,
"learning_rate": 1.2828395751816658e-05,
"loss": 0.2034,
"step": 2760
},
{
"epoch": 0.7531266992930941,
"grad_norm": 1.9765625,
"learning_rate": 1.2688652878703185e-05,
"loss": 0.1786,
"step": 2770
},
{
"epoch": 0.7558455682436107,
"grad_norm": 1.5,
"learning_rate": 1.2548910005589715e-05,
"loss": 0.1941,
"step": 2780
},
{
"epoch": 0.7585644371941273,
"grad_norm": 2.109375,
"learning_rate": 1.2409167132476244e-05,
"loss": 0.1586,
"step": 2790
},
{
"epoch": 0.7612833061446438,
"grad_norm": 2.59375,
"learning_rate": 1.2269424259362773e-05,
"loss": 0.1914,
"step": 2800
},
{
"epoch": 0.7612833061446438,
"eval_loss": 0.14214062690734863,
"eval_runtime": 99.6815,
"eval_samples_per_second": 5.016,
"eval_steps_per_second": 0.161,
"step": 2800
},
{
"epoch": 0.7640021750951604,
"grad_norm": 2.6875,
"learning_rate": 1.2129681386249302e-05,
"loss": 0.1537,
"step": 2810
},
{
"epoch": 0.766721044045677,
"grad_norm": 1.5234375,
"learning_rate": 1.198993851313583e-05,
"loss": 0.1528,
"step": 2820
},
{
"epoch": 0.7694399129961936,
"grad_norm": 1.5234375,
"learning_rate": 1.1850195640022359e-05,
"loss": 0.1504,
"step": 2830
},
{
"epoch": 0.7721587819467102,
"grad_norm": 1.875,
"learning_rate": 1.1710452766908888e-05,
"loss": 0.1599,
"step": 2840
},
{
"epoch": 0.7748776508972267,
"grad_norm": 2.21875,
"learning_rate": 1.1570709893795418e-05,
"loss": 0.1833,
"step": 2850
},
{
"epoch": 0.7748776508972267,
"eval_loss": 0.15465624630451202,
"eval_runtime": 98.484,
"eval_samples_per_second": 5.077,
"eval_steps_per_second": 0.162,
"step": 2850
},
{
"epoch": 0.7775965198477434,
"grad_norm": 2.796875,
"learning_rate": 1.1430967020681945e-05,
"loss": 0.1537,
"step": 2860
},
{
"epoch": 0.7803153887982599,
"grad_norm": 2.796875,
"learning_rate": 1.1291224147568474e-05,
"loss": 0.1916,
"step": 2870
},
{
"epoch": 0.7830342577487766,
"grad_norm": 1.875,
"learning_rate": 1.1151481274455004e-05,
"loss": 0.1445,
"step": 2880
},
{
"epoch": 0.7857531266992931,
"grad_norm": 3.0625,
"learning_rate": 1.1011738401341531e-05,
"loss": 0.187,
"step": 2890
},
{
"epoch": 0.7884719956498096,
"grad_norm": 1.34375,
"learning_rate": 1.0871995528228062e-05,
"loss": 0.1492,
"step": 2900
},
{
"epoch": 0.7884719956498096,
"eval_loss": 0.14948438107967377,
"eval_runtime": 117.0767,
"eval_samples_per_second": 4.271,
"eval_steps_per_second": 0.137,
"step": 2900
},
{
"epoch": 0.7911908646003263,
"grad_norm": 1.15625,
"learning_rate": 1.073225265511459e-05,
"loss": 0.211,
"step": 2910
},
{
"epoch": 0.7939097335508428,
"grad_norm": 1.2109375,
"learning_rate": 1.0592509782001119e-05,
"loss": 0.1555,
"step": 2920
},
{
"epoch": 0.7966286025013595,
"grad_norm": 1.578125,
"learning_rate": 1.0452766908887646e-05,
"loss": 0.1666,
"step": 2930
},
{
"epoch": 0.799347471451876,
"grad_norm": 1.8046875,
"learning_rate": 1.0313024035774176e-05,
"loss": 0.1936,
"step": 2940
},
{
"epoch": 0.8020663404023926,
"grad_norm": 2.421875,
"learning_rate": 1.0173281162660705e-05,
"loss": 0.1484,
"step": 2950
},
{
"epoch": 0.8020663404023926,
"eval_loss": 0.1609531193971634,
"eval_runtime": 104.0405,
"eval_samples_per_second": 4.806,
"eval_steps_per_second": 0.154,
"step": 2950
},
{
"epoch": 0.8047852093529092,
"grad_norm": 1.34375,
"learning_rate": 1.0033538289547234e-05,
"loss": 0.1312,
"step": 2960
},
{
"epoch": 0.8075040783034257,
"grad_norm": 1.265625,
"learning_rate": 9.893795416433763e-06,
"loss": 0.1239,
"step": 2970
},
{
"epoch": 0.8102229472539424,
"grad_norm": 2.265625,
"learning_rate": 9.754052543320291e-06,
"loss": 0.1733,
"step": 2980
},
{
"epoch": 0.8129418162044589,
"grad_norm": 1.359375,
"learning_rate": 9.61430967020682e-06,
"loss": 0.1964,
"step": 2990
},
{
"epoch": 0.8156606851549756,
"grad_norm": 1.46875,
"learning_rate": 9.474566797093348e-06,
"loss": 0.1463,
"step": 3000
},
{
"epoch": 0.8156606851549756,
"eval_loss": 0.15464062988758087,
"eval_runtime": 103.2751,
"eval_samples_per_second": 4.841,
"eval_steps_per_second": 0.155,
"step": 3000
},
{
"epoch": 0.8183795541054921,
"grad_norm": 1.84375,
"learning_rate": 9.334823923979877e-06,
"loss": 0.1547,
"step": 3010
},
{
"epoch": 0.8210984230560087,
"grad_norm": 1.7265625,
"learning_rate": 9.195081050866406e-06,
"loss": 0.1783,
"step": 3020
},
{
"epoch": 0.8238172920065253,
"grad_norm": 1.609375,
"learning_rate": 9.055338177752935e-06,
"loss": 0.1595,
"step": 3030
},
{
"epoch": 0.8265361609570419,
"grad_norm": 1.296875,
"learning_rate": 8.915595304639463e-06,
"loss": 0.1625,
"step": 3040
},
{
"epoch": 0.8292550299075585,
"grad_norm": 1.671875,
"learning_rate": 8.775852431525992e-06,
"loss": 0.1821,
"step": 3050
},
{
"epoch": 0.8292550299075585,
"eval_loss": 0.15335936844348907,
"eval_runtime": 100.4729,
"eval_samples_per_second": 4.976,
"eval_steps_per_second": 0.159,
"step": 3050
},
{
"epoch": 0.831973898858075,
"grad_norm": 3.0625,
"learning_rate": 8.636109558412521e-06,
"loss": 0.1855,
"step": 3060
},
{
"epoch": 0.8346927678085916,
"grad_norm": 1.328125,
"learning_rate": 8.496366685299049e-06,
"loss": 0.1513,
"step": 3070
},
{
"epoch": 0.8374116367591082,
"grad_norm": 1.0859375,
"learning_rate": 8.35662381218558e-06,
"loss": 0.1566,
"step": 3080
},
{
"epoch": 0.8401305057096248,
"grad_norm": 1.90625,
"learning_rate": 8.216880939072108e-06,
"loss": 0.1611,
"step": 3090
},
{
"epoch": 0.8428493746601414,
"grad_norm": 1.9453125,
"learning_rate": 8.077138065958637e-06,
"loss": 0.1432,
"step": 3100
},
{
"epoch": 0.8428493746601414,
"eval_loss": 0.15146875381469727,
"eval_runtime": 100.1544,
"eval_samples_per_second": 4.992,
"eval_steps_per_second": 0.16,
"step": 3100
},
{
"epoch": 0.845568243610658,
"grad_norm": 1.8359375,
"learning_rate": 7.937395192845164e-06,
"loss": 0.1737,
"step": 3110
},
{
"epoch": 0.8482871125611745,
"grad_norm": 1.0703125,
"learning_rate": 7.797652319731694e-06,
"loss": 0.1423,
"step": 3120
},
{
"epoch": 0.8510059815116912,
"grad_norm": 2.046875,
"learning_rate": 7.657909446618223e-06,
"loss": 0.1262,
"step": 3130
},
{
"epoch": 0.8537248504622077,
"grad_norm": 0.8515625,
"learning_rate": 7.518166573504752e-06,
"loss": 0.1402,
"step": 3140
},
{
"epoch": 0.8564437194127243,
"grad_norm": 1.3125,
"learning_rate": 7.3784237003912805e-06,
"loss": 0.1803,
"step": 3150
},
{
"epoch": 0.8564437194127243,
"eval_loss": 0.13735155761241913,
"eval_runtime": 98.6763,
"eval_samples_per_second": 5.067,
"eval_steps_per_second": 0.162,
"step": 3150
},
{
"epoch": 0.8591625883632409,
"grad_norm": 1.2265625,
"learning_rate": 7.238680827277809e-06,
"loss": 0.1801,
"step": 3160
},
{
"epoch": 0.8618814573137574,
"grad_norm": 3.9375,
"learning_rate": 7.098937954164338e-06,
"loss": 0.1819,
"step": 3170
},
{
"epoch": 0.8646003262642741,
"grad_norm": 1.875,
"learning_rate": 6.9591950810508665e-06,
"loss": 0.1458,
"step": 3180
},
{
"epoch": 0.8673191952147906,
"grad_norm": 2.9375,
"learning_rate": 6.819452207937395e-06,
"loss": 0.168,
"step": 3190
},
{
"epoch": 0.8700380641653073,
"grad_norm": 1.9609375,
"learning_rate": 6.679709334823925e-06,
"loss": 0.1505,
"step": 3200
},
{
"epoch": 0.8700380641653073,
"eval_loss": 0.14777344465255737,
"eval_runtime": 99.0119,
"eval_samples_per_second": 5.05,
"eval_steps_per_second": 0.162,
"step": 3200
},
{
"epoch": 0.8727569331158238,
"grad_norm": 9.0,
"learning_rate": 6.539966461710453e-06,
"loss": 0.1582,
"step": 3210
},
{
"epoch": 0.8754758020663403,
"grad_norm": 1.890625,
"learning_rate": 6.400223588596982e-06,
"loss": 0.176,
"step": 3220
},
{
"epoch": 0.878194671016857,
"grad_norm": 1.25,
"learning_rate": 6.26048071548351e-06,
"loss": 0.1346,
"step": 3230
},
{
"epoch": 0.8809135399673735,
"grad_norm": 1.953125,
"learning_rate": 6.1207378423700394e-06,
"loss": 0.1424,
"step": 3240
},
{
"epoch": 0.8836324089178902,
"grad_norm": 1.8671875,
"learning_rate": 5.980994969256568e-06,
"loss": 0.1413,
"step": 3250
},
{
"epoch": 0.8836324089178902,
"eval_loss": 0.14879687130451202,
"eval_runtime": 95.0133,
"eval_samples_per_second": 5.262,
"eval_steps_per_second": 0.168,
"step": 3250
},
{
"epoch": 0.8863512778684067,
"grad_norm": 1.8359375,
"learning_rate": 5.841252096143097e-06,
"loss": 0.1432,
"step": 3260
},
{
"epoch": 0.8890701468189234,
"grad_norm": 2.203125,
"learning_rate": 5.701509223029626e-06,
"loss": 0.1352,
"step": 3270
},
{
"epoch": 0.8917890157694399,
"grad_norm": 1.578125,
"learning_rate": 5.561766349916155e-06,
"loss": 0.1572,
"step": 3280
},
{
"epoch": 0.8945078847199565,
"grad_norm": 2.078125,
"learning_rate": 5.422023476802683e-06,
"loss": 0.1746,
"step": 3290
},
{
"epoch": 0.8972267536704731,
"grad_norm": 1.5703125,
"learning_rate": 5.282280603689212e-06,
"loss": 0.1812,
"step": 3300
},
{
"epoch": 0.8972267536704731,
"eval_loss": 0.1419062465429306,
"eval_runtime": 95.5103,
"eval_samples_per_second": 5.235,
"eval_steps_per_second": 0.168,
"step": 3300
},
{
"epoch": 0.8999456226209896,
"grad_norm": 1.2265625,
"learning_rate": 5.142537730575741e-06,
"loss": 0.1398,
"step": 3310
},
{
"epoch": 0.9026644915715063,
"grad_norm": 2.28125,
"learning_rate": 5.002794857462269e-06,
"loss": 0.143,
"step": 3320
},
{
"epoch": 0.9053833605220228,
"grad_norm": 2.34375,
"learning_rate": 4.863051984348798e-06,
"loss": 0.1623,
"step": 3330
},
{
"epoch": 0.9081022294725394,
"grad_norm": 3.21875,
"learning_rate": 4.723309111235328e-06,
"loss": 0.1893,
"step": 3340
},
{
"epoch": 0.910821098423056,
"grad_norm": 1.5703125,
"learning_rate": 4.583566238121856e-06,
"loss": 0.1617,
"step": 3350
},
{
"epoch": 0.910821098423056,
"eval_loss": 0.15714062750339508,
"eval_runtime": 94.6634,
"eval_samples_per_second": 5.282,
"eval_steps_per_second": 0.169,
"step": 3350
},
{
"epoch": 0.9135399673735726,
"grad_norm": 1.7734375,
"learning_rate": 4.443823365008385e-06,
"loss": 0.1508,
"step": 3360
},
{
"epoch": 0.9162588363240892,
"grad_norm": 5.09375,
"learning_rate": 4.304080491894914e-06,
"loss": 0.1355,
"step": 3370
},
{
"epoch": 0.9189777052746058,
"grad_norm": 2.09375,
"learning_rate": 4.164337618781442e-06,
"loss": 0.1457,
"step": 3380
},
{
"epoch": 0.9216965742251223,
"grad_norm": 1.296875,
"learning_rate": 4.024594745667971e-06,
"loss": 0.1391,
"step": 3390
},
{
"epoch": 0.924415443175639,
"grad_norm": 2.046875,
"learning_rate": 3.8848518725545e-06,
"loss": 0.1853,
"step": 3400
},
{
"epoch": 0.924415443175639,
"eval_loss": 0.14689843356609344,
"eval_runtime": 109.943,
"eval_samples_per_second": 4.548,
"eval_steps_per_second": 0.146,
"step": 3400
},
{
"epoch": 0.9271343121261555,
"grad_norm": 2.265625,
"learning_rate": 3.7451089994410285e-06,
"loss": 0.1904,
"step": 3410
},
{
"epoch": 0.9298531810766721,
"grad_norm": 1.34375,
"learning_rate": 3.6053661263275578e-06,
"loss": 0.1342,
"step": 3420
},
{
"epoch": 0.9325720500271887,
"grad_norm": 2.15625,
"learning_rate": 3.465623253214086e-06,
"loss": 0.1583,
"step": 3430
},
{
"epoch": 0.9352909189777052,
"grad_norm": 1.859375,
"learning_rate": 3.325880380100615e-06,
"loss": 0.1763,
"step": 3440
},
{
"epoch": 0.9380097879282219,
"grad_norm": 1.703125,
"learning_rate": 3.1861375069871442e-06,
"loss": 0.1367,
"step": 3450
},
{
"epoch": 0.9380097879282219,
"eval_loss": 0.1392187476158142,
"eval_runtime": 96.262,
"eval_samples_per_second": 5.194,
"eval_steps_per_second": 0.166,
"step": 3450
},
{
"epoch": 0.9407286568787384,
"grad_norm": 1.15625,
"learning_rate": 3.0463946338736726e-06,
"loss": 0.184,
"step": 3460
},
{
"epoch": 0.9434475258292551,
"grad_norm": 1.2734375,
"learning_rate": 2.9066517607602015e-06,
"loss": 0.1794,
"step": 3470
},
{
"epoch": 0.9461663947797716,
"grad_norm": 2.9375,
"learning_rate": 2.7669088876467303e-06,
"loss": 0.1705,
"step": 3480
},
{
"epoch": 0.9488852637302883,
"grad_norm": 1.0078125,
"learning_rate": 2.627166014533259e-06,
"loss": 0.1846,
"step": 3490
},
{
"epoch": 0.9516041326808048,
"grad_norm": 0.97265625,
"learning_rate": 2.487423141419788e-06,
"loss": 0.1917,
"step": 3500
},
{
"epoch": 0.9516041326808048,
"eval_loss": 0.14869531989097595,
"eval_runtime": 96.4372,
"eval_samples_per_second": 5.185,
"eval_steps_per_second": 0.166,
"step": 3500
}
],
"logging_steps": 10,
"max_steps": 3678,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.05068095973888e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}