Grogros's picture
Training in progress, step 1500, checkpoint
2edcbac verified
raw
history blame
22.9 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004,
"grad_norm": 192.0,
"learning_rate": 7.2e-07,
"loss": 17.8929,
"step": 10
},
{
"epoch": 0.008,
"grad_norm": 116.5,
"learning_rate": 1.52e-06,
"loss": 17.1894,
"step": 20
},
{
"epoch": 0.012,
"grad_norm": 93.5,
"learning_rate": 2.3200000000000002e-06,
"loss": 17.1662,
"step": 30
},
{
"epoch": 0.016,
"grad_norm": 59.0,
"learning_rate": 3.12e-06,
"loss": 15.5562,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 93.5,
"learning_rate": 3.920000000000001e-06,
"loss": 14.6218,
"step": 50
},
{
"epoch": 0.024,
"grad_norm": 122.5,
"learning_rate": 4.7200000000000005e-06,
"loss": 14.0262,
"step": 60
},
{
"epoch": 0.028,
"grad_norm": 135.0,
"learning_rate": 5.5200000000000005e-06,
"loss": 14.516,
"step": 70
},
{
"epoch": 0.032,
"grad_norm": 211.0,
"learning_rate": 6.3200000000000005e-06,
"loss": 13.598,
"step": 80
},
{
"epoch": 0.036,
"grad_norm": 172.0,
"learning_rate": 7.1200000000000004e-06,
"loss": 14.4849,
"step": 90
},
{
"epoch": 0.04,
"grad_norm": 183.0,
"learning_rate": 7.92e-06,
"loss": 13.3844,
"step": 100
},
{
"epoch": 0.044,
"grad_norm": 144.0,
"learning_rate": 8.720000000000001e-06,
"loss": 13.5488,
"step": 110
},
{
"epoch": 0.048,
"grad_norm": 153.0,
"learning_rate": 9.52e-06,
"loss": 14.8995,
"step": 120
},
{
"epoch": 0.052,
"grad_norm": 156.0,
"learning_rate": 1.0320000000000001e-05,
"loss": 14.0248,
"step": 130
},
{
"epoch": 0.056,
"grad_norm": 276.0,
"learning_rate": 1.1120000000000002e-05,
"loss": 14.2637,
"step": 140
},
{
"epoch": 0.06,
"grad_norm": 176.0,
"learning_rate": 1.1920000000000001e-05,
"loss": 15.7517,
"step": 150
},
{
"epoch": 0.064,
"grad_norm": 178.0,
"learning_rate": 1.2720000000000002e-05,
"loss": 15.2519,
"step": 160
},
{
"epoch": 0.068,
"grad_norm": 5408.0,
"learning_rate": 1.3520000000000003e-05,
"loss": 51.245,
"step": 170
},
{
"epoch": 0.072,
"grad_norm": 576.0,
"learning_rate": 1.432e-05,
"loss": 23.1949,
"step": 180
},
{
"epoch": 0.076,
"grad_norm": 402.0,
"learning_rate": 1.5120000000000001e-05,
"loss": 18.3584,
"step": 190
},
{
"epoch": 0.08,
"grad_norm": 178.0,
"learning_rate": 1.5920000000000003e-05,
"loss": 17.5383,
"step": 200
},
{
"epoch": 0.084,
"grad_norm": 2064.0,
"learning_rate": 1.672e-05,
"loss": 22.9987,
"step": 210
},
{
"epoch": 0.088,
"grad_norm": 190.0,
"learning_rate": 1.752e-05,
"loss": 19.4033,
"step": 220
},
{
"epoch": 0.092,
"grad_norm": 150.0,
"learning_rate": 1.832e-05,
"loss": 15.7816,
"step": 230
},
{
"epoch": 0.096,
"grad_norm": 182.0,
"learning_rate": 1.912e-05,
"loss": 20.0571,
"step": 240
},
{
"epoch": 0.1,
"grad_norm": 302.0,
"learning_rate": 1.9920000000000002e-05,
"loss": 19.3505,
"step": 250
},
{
"epoch": 0.104,
"grad_norm": 246.0,
"learning_rate": 1.9999210442038164e-05,
"loss": 19.3353,
"step": 260
},
{
"epoch": 0.108,
"grad_norm": 139.0,
"learning_rate": 1.9996481265944146e-05,
"loss": 17.5568,
"step": 270
},
{
"epoch": 0.112,
"grad_norm": 314.0,
"learning_rate": 1.9991803256020393e-05,
"loss": 24.114,
"step": 280
},
{
"epoch": 0.116,
"grad_norm": 168.0,
"learning_rate": 1.99851773242542e-05,
"loss": 24.3222,
"step": 290
},
{
"epoch": 0.12,
"grad_norm": 157.0,
"learning_rate": 1.99766047623841e-05,
"loss": 17.1357,
"step": 300
},
{
"epoch": 0.124,
"grad_norm": 243.0,
"learning_rate": 1.996608724164801e-05,
"loss": 17.5944,
"step": 310
},
{
"epoch": 0.128,
"grad_norm": 168.0,
"learning_rate": 1.995362681245744e-05,
"loss": 17.9973,
"step": 320
},
{
"epoch": 0.132,
"grad_norm": 129.0,
"learning_rate": 1.9939225903997748e-05,
"loss": 16.3556,
"step": 330
},
{
"epoch": 0.136,
"grad_norm": 109.0,
"learning_rate": 1.992288732375458e-05,
"loss": 15.275,
"step": 340
},
{
"epoch": 0.14,
"grad_norm": 148.0,
"learning_rate": 1.9904614256966514e-05,
"loss": 16.484,
"step": 350
},
{
"epoch": 0.144,
"grad_norm": 139.0,
"learning_rate": 1.9884410266004134e-05,
"loss": 17.0713,
"step": 360
},
{
"epoch": 0.148,
"grad_norm": 252.0,
"learning_rate": 1.986227928967551e-05,
"loss": 17.0777,
"step": 370
},
{
"epoch": 0.152,
"grad_norm": 160.0,
"learning_rate": 1.983822564245833e-05,
"loss": 16.9359,
"step": 380
},
{
"epoch": 0.156,
"grad_norm": 128.0,
"learning_rate": 1.981225401365877e-05,
"loss": 33.823,
"step": 390
},
{
"epoch": 0.16,
"grad_norm": 142.0,
"learning_rate": 1.9784369466497333e-05,
"loss": 17.8366,
"step": 400
},
{
"epoch": 0.164,
"grad_norm": 604.0,
"learning_rate": 1.9754577437121733e-05,
"loss": 17.0926,
"step": 410
},
{
"epoch": 0.168,
"grad_norm": 138.0,
"learning_rate": 1.9722883733547128e-05,
"loss": 25.0166,
"step": 420
},
{
"epoch": 0.172,
"grad_norm": 140.0,
"learning_rate": 1.968929453452383e-05,
"loss": 16.1607,
"step": 430
},
{
"epoch": 0.176,
"grad_norm": 100.0,
"learning_rate": 1.965381638833274e-05,
"loss": 15.8693,
"step": 440
},
{
"epoch": 0.18,
"grad_norm": 127.5,
"learning_rate": 1.9616456211508756e-05,
"loss": 16.6326,
"step": 450
},
{
"epoch": 0.184,
"grad_norm": 136.0,
"learning_rate": 1.9577221287492368e-05,
"loss": 16.4813,
"step": 460
},
{
"epoch": 0.188,
"grad_norm": 175.0,
"learning_rate": 1.9536119265209763e-05,
"loss": 16.9464,
"step": 470
},
{
"epoch": 0.192,
"grad_norm": 128.0,
"learning_rate": 1.9493158157581617e-05,
"loss": 16.8985,
"step": 480
},
{
"epoch": 0.196,
"grad_norm": 141.0,
"learning_rate": 1.9448346339960984e-05,
"loss": 16.1715,
"step": 490
},
{
"epoch": 0.2,
"grad_norm": 112.0,
"learning_rate": 1.9401692548500504e-05,
"loss": 15.8461,
"step": 500
},
{
"epoch": 0.204,
"grad_norm": 145.0,
"learning_rate": 1.935320587844926e-05,
"loss": 16.108,
"step": 510
},
{
"epoch": 0.208,
"grad_norm": 136.0,
"learning_rate": 1.9302895782379648e-05,
"loss": 15.7665,
"step": 520
},
{
"epoch": 0.212,
"grad_norm": 112.0,
"learning_rate": 1.925077206834458e-05,
"loss": 14.6068,
"step": 530
},
{
"epoch": 0.216,
"grad_norm": 93.0,
"learning_rate": 1.9196844897965393e-05,
"loss": 15.5989,
"step": 540
},
{
"epoch": 0.22,
"grad_norm": 121.0,
"learning_rate": 1.914112478445079e-05,
"loss": 15.9623,
"step": 550
},
{
"epoch": 0.224,
"grad_norm": 95.0,
"learning_rate": 1.9083622590547313e-05,
"loss": 15.6861,
"step": 560
},
{
"epoch": 0.228,
"grad_norm": 114.5,
"learning_rate": 1.9024349526421596e-05,
"loss": 15.4145,
"step": 570
},
{
"epoch": 0.232,
"grad_norm": 114.5,
"learning_rate": 1.896331714747493e-05,
"loss": 14.4382,
"step": 580
},
{
"epoch": 0.236,
"grad_norm": 111.0,
"learning_rate": 1.8900537352090523e-05,
"loss": 14.8372,
"step": 590
},
{
"epoch": 0.24,
"grad_norm": 129.0,
"learning_rate": 1.8836022379313884e-05,
"loss": 16.0596,
"step": 600
},
{
"epoch": 0.244,
"grad_norm": 97.0,
"learning_rate": 1.8769784806466768e-05,
"loss": 14.8813,
"step": 610
},
{
"epoch": 0.248,
"grad_norm": 97.0,
"learning_rate": 1.870183754669526e-05,
"loss": 13.9936,
"step": 620
},
{
"epoch": 0.252,
"grad_norm": 111.5,
"learning_rate": 1.863219384645227e-05,
"loss": 14.9816,
"step": 630
},
{
"epoch": 0.256,
"grad_norm": 108.5,
"learning_rate": 1.8560867282915164e-05,
"loss": 14.2031,
"step": 640
},
{
"epoch": 0.26,
"grad_norm": 104.0,
"learning_rate": 1.848787176133882e-05,
"loss": 14.7739,
"step": 650
},
{
"epoch": 0.264,
"grad_norm": 89.0,
"learning_rate": 1.8413221512344805e-05,
"loss": 14.7629,
"step": 660
},
{
"epoch": 0.268,
"grad_norm": 113.5,
"learning_rate": 1.8336931089147076e-05,
"loss": 14.6109,
"step": 670
},
{
"epoch": 0.272,
"grad_norm": 97.5,
"learning_rate": 1.8259015364714786e-05,
"loss": 15.1143,
"step": 680
},
{
"epoch": 0.276,
"grad_norm": 108.0,
"learning_rate": 1.8179489528872808e-05,
"loss": 14.3949,
"step": 690
},
{
"epoch": 0.28,
"grad_norm": 111.5,
"learning_rate": 1.80983690853404e-05,
"loss": 14.4273,
"step": 700
},
{
"epoch": 0.284,
"grad_norm": 112.5,
"learning_rate": 1.8015669848708768e-05,
"loss": 13.8492,
"step": 710
},
{
"epoch": 0.288,
"grad_norm": 119.5,
"learning_rate": 1.793140794135795e-05,
"loss": 14.5132,
"step": 720
},
{
"epoch": 0.292,
"grad_norm": 100.0,
"learning_rate": 1.7845599790313735e-05,
"loss": 13.9506,
"step": 730
},
{
"epoch": 0.296,
"grad_norm": 133.0,
"learning_rate": 1.7758262124045195e-05,
"loss": 13.6922,
"step": 740
},
{
"epoch": 0.3,
"grad_norm": 100.5,
"learning_rate": 1.7669411969203417e-05,
"loss": 14.3881,
"step": 750
},
{
"epoch": 0.304,
"grad_norm": 128.0,
"learning_rate": 1.7579066647302134e-05,
"loss": 14.2717,
"step": 760
},
{
"epoch": 0.308,
"grad_norm": 98.0,
"learning_rate": 1.7487243771340862e-05,
"loss": 13.3169,
"step": 770
},
{
"epoch": 0.312,
"grad_norm": 95.0,
"learning_rate": 1.7393961242371203e-05,
"loss": 13.9998,
"step": 780
},
{
"epoch": 0.316,
"grad_norm": 118.5,
"learning_rate": 1.7299237246007018e-05,
"loss": 13.8477,
"step": 790
},
{
"epoch": 0.32,
"grad_norm": 83.0,
"learning_rate": 1.720309024887907e-05,
"loss": 13.4694,
"step": 800
},
{
"epoch": 0.324,
"grad_norm": 119.0,
"learning_rate": 1.710553899503496e-05,
"loss": 14.0974,
"step": 810
},
{
"epoch": 0.328,
"grad_norm": 93.0,
"learning_rate": 1.700660250228492e-05,
"loss": 13.7642,
"step": 820
},
{
"epoch": 0.332,
"grad_norm": 113.0,
"learning_rate": 1.690630005849423e-05,
"loss": 13.5297,
"step": 830
},
{
"epoch": 0.336,
"grad_norm": 98.0,
"learning_rate": 1.6804651217823055e-05,
"loss": 12.6107,
"step": 840
},
{
"epoch": 0.34,
"grad_norm": 90.5,
"learning_rate": 1.6701675796914284e-05,
"loss": 13.7018,
"step": 850
},
{
"epoch": 0.344,
"grad_norm": 84.5,
"learning_rate": 1.6597393871030264e-05,
"loss": 13.5795,
"step": 860
},
{
"epoch": 0.348,
"grad_norm": 88.0,
"learning_rate": 1.649182577013906e-05,
"loss": 13.3089,
"step": 870
},
{
"epoch": 0.352,
"grad_norm": 108.5,
"learning_rate": 1.6384992074951124e-05,
"loss": 14.8595,
"step": 880
},
{
"epoch": 0.356,
"grad_norm": 109.5,
"learning_rate": 1.6276913612907005e-05,
"loss": 13.6668,
"step": 890
},
{
"epoch": 0.36,
"grad_norm": 82.0,
"learning_rate": 1.6167611454117027e-05,
"loss": 14.3564,
"step": 900
},
{
"epoch": 0.364,
"grad_norm": 88.5,
"learning_rate": 1.6057106907253617e-05,
"loss": 13.4427,
"step": 910
},
{
"epoch": 0.368,
"grad_norm": 106.5,
"learning_rate": 1.5945421515397135e-05,
"loss": 12.7742,
"step": 920
},
{
"epoch": 0.372,
"grad_norm": 98.0,
"learning_rate": 1.5832577051836016e-05,
"loss": 12.5311,
"step": 930
},
{
"epoch": 0.376,
"grad_norm": 105.5,
"learning_rate": 1.5718595515822027e-05,
"loss": 13.2552,
"step": 940
},
{
"epoch": 0.38,
"grad_norm": 104.0,
"learning_rate": 1.5603499128281447e-05,
"loss": 12.7764,
"step": 950
},
{
"epoch": 0.384,
"grad_norm": 88.0,
"learning_rate": 1.5487310327483087e-05,
"loss": 12.5753,
"step": 960
},
{
"epoch": 0.388,
"grad_norm": 88.0,
"learning_rate": 1.5370051764663872e-05,
"loss": 11.839,
"step": 970
},
{
"epoch": 0.392,
"grad_norm": 79.0,
"learning_rate": 1.5251746299612959e-05,
"loss": 12.3413,
"step": 980
},
{
"epoch": 0.396,
"grad_norm": 88.5,
"learning_rate": 1.5132416996215171e-05,
"loss": 11.7489,
"step": 990
},
{
"epoch": 0.4,
"grad_norm": 79.0,
"learning_rate": 1.5012087117954643e-05,
"loss": 12.265,
"step": 1000
},
{
"epoch": 0.404,
"grad_norm": 84.5,
"learning_rate": 1.4890780123379565e-05,
"loss": 12.1652,
"step": 1010
},
{
"epoch": 0.408,
"grad_norm": 83.0,
"learning_rate": 1.4768519661528879e-05,
"loss": 11.4174,
"step": 1020
},
{
"epoch": 0.412,
"grad_norm": 94.5,
"learning_rate": 1.464532956732188e-05,
"loss": 12.5315,
"step": 1030
},
{
"epoch": 0.416,
"grad_norm": 87.0,
"learning_rate": 1.4521233856911507e-05,
"loss": 12.0051,
"step": 1040
},
{
"epoch": 0.42,
"grad_norm": 85.5,
"learning_rate": 1.43962567230024e-05,
"loss": 12.177,
"step": 1050
},
{
"epoch": 0.424,
"grad_norm": 91.0,
"learning_rate": 1.4270422530134433e-05,
"loss": 12.3925,
"step": 1060
},
{
"epoch": 0.428,
"grad_norm": 96.5,
"learning_rate": 1.4143755809932843e-05,
"loss": 12.4198,
"step": 1070
},
{
"epoch": 0.432,
"grad_norm": 82.0,
"learning_rate": 1.4016281256325702e-05,
"loss": 12.5425,
"step": 1080
},
{
"epoch": 0.436,
"grad_norm": 73.5,
"learning_rate": 1.388802372072981e-05,
"loss": 12.0934,
"step": 1090
},
{
"epoch": 0.44,
"grad_norm": 73.0,
"learning_rate": 1.3759008207205869e-05,
"loss": 11.4355,
"step": 1100
},
{
"epoch": 0.444,
"grad_norm": 78.5,
"learning_rate": 1.3629259867583864e-05,
"loss": 11.8871,
"step": 1110
},
{
"epoch": 0.448,
"grad_norm": 76.5,
"learning_rate": 1.349880399655969e-05,
"loss": 11.2964,
"step": 1120
},
{
"epoch": 0.452,
"grad_norm": 86.0,
"learning_rate": 1.3367666026763884e-05,
"loss": 11.2128,
"step": 1130
},
{
"epoch": 0.456,
"grad_norm": 78.5,
"learning_rate": 1.3235871523803496e-05,
"loss": 11.5252,
"step": 1140
},
{
"epoch": 0.46,
"grad_norm": 76.5,
"learning_rate": 1.3103446181278015e-05,
"loss": 11.3193,
"step": 1150
},
{
"epoch": 0.464,
"grad_norm": 78.0,
"learning_rate": 1.297041581577035e-05,
"loss": 11.2879,
"step": 1160
},
{
"epoch": 0.468,
"grad_norm": 81.5,
"learning_rate": 1.2836806361813846e-05,
"loss": 11.1236,
"step": 1170
},
{
"epoch": 0.472,
"grad_norm": 72.0,
"learning_rate": 1.270264386683628e-05,
"loss": 10.7719,
"step": 1180
},
{
"epoch": 0.476,
"grad_norm": 73.0,
"learning_rate": 1.256795448608188e-05,
"loss": 10.945,
"step": 1190
},
{
"epoch": 0.48,
"grad_norm": 72.5,
"learning_rate": 1.2432764477512294e-05,
"loss": 11.3054,
"step": 1200
},
{
"epoch": 0.484,
"grad_norm": 69.0,
"learning_rate": 1.2297100196687557e-05,
"loss": 11.0717,
"step": 1210
},
{
"epoch": 0.488,
"grad_norm": 79.0,
"learning_rate": 1.2160988091628023e-05,
"loss": 11.1283,
"step": 1220
},
{
"epoch": 0.492,
"grad_norm": 69.5,
"learning_rate": 1.202445469765826e-05,
"loss": 10.7246,
"step": 1230
},
{
"epoch": 0.496,
"grad_norm": 64.5,
"learning_rate": 1.1887526632233954e-05,
"loss": 11.1319,
"step": 1240
},
{
"epoch": 0.5,
"grad_norm": 74.0,
"learning_rate": 1.1750230589752763e-05,
"loss": 11.0599,
"step": 1250
},
{
"epoch": 0.504,
"grad_norm": 90.5,
"learning_rate": 1.1612593336350209e-05,
"loss": 10.9624,
"step": 1260
},
{
"epoch": 0.508,
"grad_norm": 74.5,
"learning_rate": 1.1474641704681551e-05,
"loss": 11.6825,
"step": 1270
},
{
"epoch": 0.512,
"grad_norm": 83.5,
"learning_rate": 1.1336402588690727e-05,
"loss": 10.4348,
"step": 1280
},
{
"epoch": 0.516,
"grad_norm": 60.25,
"learning_rate": 1.1197902938367297e-05,
"loss": 10.192,
"step": 1290
},
{
"epoch": 0.52,
"grad_norm": 70.5,
"learning_rate": 1.105916975449252e-05,
"loss": 10.9921,
"step": 1300
},
{
"epoch": 0.524,
"grad_norm": 68.5,
"learning_rate": 1.0920230083375474e-05,
"loss": 10.9344,
"step": 1310
},
{
"epoch": 0.528,
"grad_norm": 94.5,
"learning_rate": 1.0781111011580336e-05,
"loss": 10.7657,
"step": 1320
},
{
"epoch": 0.532,
"grad_norm": 66.0,
"learning_rate": 1.0641839660645806e-05,
"loss": 10.2728,
"step": 1330
},
{
"epoch": 0.536,
"grad_norm": 86.0,
"learning_rate": 1.0502443181797696e-05,
"loss": 10.3206,
"step": 1340
},
{
"epoch": 0.54,
"grad_norm": 71.0,
"learning_rate": 1.036294875065576e-05,
"loss": 10.4514,
"step": 1350
},
{
"epoch": 0.544,
"grad_norm": 63.0,
"learning_rate": 1.0223383561935738e-05,
"loss": 10.148,
"step": 1360
},
{
"epoch": 0.548,
"grad_norm": 69.0,
"learning_rate": 1.0083774824147707e-05,
"loss": 10.213,
"step": 1370
},
{
"epoch": 0.552,
"grad_norm": 75.0,
"learning_rate": 9.944149754291719e-06,
"loss": 10.3198,
"step": 1380
},
{
"epoch": 0.556,
"grad_norm": 75.0,
"learning_rate": 9.80453557255179e-06,
"loss": 9.7986,
"step": 1390
},
{
"epoch": 0.56,
"grad_norm": 69.0,
"learning_rate": 9.664959496989286e-06,
"loss": 10.2986,
"step": 1400
},
{
"epoch": 0.564,
"grad_norm": 74.0,
"learning_rate": 9.525448738236691e-06,
"loss": 10.4321,
"step": 1410
},
{
"epoch": 0.568,
"grad_norm": 74.5,
"learning_rate": 9.386030494192847e-06,
"loss": 9.978,
"step": 1420
},
{
"epoch": 0.572,
"grad_norm": 70.0,
"learning_rate": 9.246731944720675e-06,
"loss": 9.8457,
"step": 1430
},
{
"epoch": 0.576,
"grad_norm": 69.0,
"learning_rate": 9.107580246348395e-06,
"loss": 9.8384,
"step": 1440
},
{
"epoch": 0.58,
"grad_norm": 71.5,
"learning_rate": 8.968602526975329e-06,
"loss": 9.4468,
"step": 1450
},
{
"epoch": 0.584,
"grad_norm": 152.0,
"learning_rate": 8.829825880583228e-06,
"loss": 9.6898,
"step": 1460
},
{
"epoch": 0.588,
"grad_norm": 59.0,
"learning_rate": 8.69127736195428e-06,
"loss": 9.3791,
"step": 1470
},
{
"epoch": 0.592,
"grad_norm": 70.0,
"learning_rate": 8.552983981396709e-06,
"loss": 9.6053,
"step": 1480
},
{
"epoch": 0.596,
"grad_norm": 63.0,
"learning_rate": 8.414972699479076e-06,
"loss": 9.5834,
"step": 1490
},
{
"epoch": 0.6,
"grad_norm": 77.5,
"learning_rate": 8.277270421774234e-06,
"loss": 9.4602,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1034040492621824e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}