PumeTu's picture
Add files using upload-large-folder tool
2ded385 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 10,
"global_step": 132,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022988505747126436,
"grad_norm": 0.11726659536361694,
"learning_rate": 0.0,
"loss": 0.949,
"step": 1
},
{
"epoch": 0.04597701149425287,
"grad_norm": 0.11742527782917023,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.9424,
"step": 2
},
{
"epoch": 0.06896551724137931,
"grad_norm": 0.11452076584100723,
"learning_rate": 2.857142857142857e-05,
"loss": 0.9528,
"step": 3
},
{
"epoch": 0.09195402298850575,
"grad_norm": 0.11016185581684113,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.9477,
"step": 4
},
{
"epoch": 0.11494252873563218,
"grad_norm": 0.09924337267875671,
"learning_rate": 5.714285714285714e-05,
"loss": 0.9246,
"step": 5
},
{
"epoch": 0.13793103448275862,
"grad_norm": 0.09508403390645981,
"learning_rate": 7.142857142857143e-05,
"loss": 0.923,
"step": 6
},
{
"epoch": 0.16091954022988506,
"grad_norm": 0.08589573949575424,
"learning_rate": 8.571428571428571e-05,
"loss": 0.9048,
"step": 7
},
{
"epoch": 0.1839080459770115,
"grad_norm": 0.07958836108446121,
"learning_rate": 0.0001,
"loss": 0.9165,
"step": 8
},
{
"epoch": 0.20689655172413793,
"grad_norm": 0.07911597937345505,
"learning_rate": 0.00011428571428571428,
"loss": 0.8881,
"step": 9
},
{
"epoch": 0.22988505747126436,
"grad_norm": 0.08259308338165283,
"learning_rate": 0.00012857142857142858,
"loss": 0.8866,
"step": 10
},
{
"epoch": 0.22988505747126436,
"eval_loss": 1.0011101961135864,
"eval_runtime": 305.7034,
"eval_samples_per_second": 32.378,
"eval_steps_per_second": 0.128,
"step": 10
},
{
"epoch": 0.25287356321839083,
"grad_norm": 0.08100606501102448,
"learning_rate": 0.00014285714285714287,
"loss": 0.8687,
"step": 11
},
{
"epoch": 0.27586206896551724,
"grad_norm": 0.08286823332309723,
"learning_rate": 0.00015714285714285716,
"loss": 0.8684,
"step": 12
},
{
"epoch": 0.2988505747126437,
"grad_norm": 0.08412789553403854,
"learning_rate": 0.00017142857142857143,
"loss": 0.8592,
"step": 13
},
{
"epoch": 0.3218390804597701,
"grad_norm": 0.09051340073347092,
"learning_rate": 0.00018571428571428572,
"loss": 0.8389,
"step": 14
},
{
"epoch": 0.3448275862068966,
"grad_norm": 0.09371305257081985,
"learning_rate": 0.0002,
"loss": 0.8125,
"step": 15
},
{
"epoch": 0.367816091954023,
"grad_norm": 0.08772534877061844,
"learning_rate": 0.00019996456111234527,
"loss": 0.8217,
"step": 16
},
{
"epoch": 0.39080459770114945,
"grad_norm": 0.07762442529201508,
"learning_rate": 0.0001998582695676762,
"loss": 0.8052,
"step": 17
},
{
"epoch": 0.41379310344827586,
"grad_norm": 0.0685521736741066,
"learning_rate": 0.000199681200703075,
"loss": 0.7908,
"step": 18
},
{
"epoch": 0.4367816091954023,
"grad_norm": 0.06715967506170273,
"learning_rate": 0.00019943348002101371,
"loss": 0.7986,
"step": 19
},
{
"epoch": 0.45977011494252873,
"grad_norm": 0.0685219094157219,
"learning_rate": 0.00019911528310040074,
"loss": 0.7869,
"step": 20
},
{
"epoch": 0.45977011494252873,
"eval_loss": 0.8174761533737183,
"eval_runtime": 302.6325,
"eval_samples_per_second": 32.706,
"eval_steps_per_second": 0.129,
"step": 20
},
{
"epoch": 0.4827586206896552,
"grad_norm": 0.06762731820344925,
"learning_rate": 0.00019872683547213446,
"loss": 0.7657,
"step": 21
},
{
"epoch": 0.5057471264367817,
"grad_norm": 0.06856454163789749,
"learning_rate": 0.00019826841245925212,
"loss": 0.7829,
"step": 22
},
{
"epoch": 0.5287356321839081,
"grad_norm": 0.06053686887025833,
"learning_rate": 0.00019774033898178667,
"loss": 0.7562,
"step": 23
},
{
"epoch": 0.5517241379310345,
"grad_norm": 0.056510183960199356,
"learning_rate": 0.00019714298932647098,
"loss": 0.7485,
"step": 24
},
{
"epoch": 0.5747126436781609,
"grad_norm": 0.05787154287099838,
"learning_rate": 0.0001964767868814516,
"loss": 0.73,
"step": 25
},
{
"epoch": 0.5977011494252874,
"grad_norm": 0.05212588980793953,
"learning_rate": 0.00019574220383620055,
"loss": 0.74,
"step": 26
},
{
"epoch": 0.6206896551724138,
"grad_norm": 0.047729115933179855,
"learning_rate": 0.00019493976084683813,
"loss": 0.7381,
"step": 27
},
{
"epoch": 0.6436781609195402,
"grad_norm": 0.044737160205841064,
"learning_rate": 0.00019407002666710336,
"loss": 0.7368,
"step": 28
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.04290475323796272,
"learning_rate": 0.00019313361774523385,
"loss": 0.7369,
"step": 29
},
{
"epoch": 0.6896551724137931,
"grad_norm": 0.03797232359647751,
"learning_rate": 0.00019213119778704128,
"loss": 0.724,
"step": 30
},
{
"epoch": 0.6896551724137931,
"eval_loss": 0.725147545337677,
"eval_runtime": 301.4442,
"eval_samples_per_second": 32.835,
"eval_steps_per_second": 0.129,
"step": 30
},
{
"epoch": 0.7126436781609196,
"grad_norm": 0.035527635365724564,
"learning_rate": 0.00019106347728549135,
"loss": 0.7304,
"step": 31
},
{
"epoch": 0.735632183908046,
"grad_norm": 0.03322393074631691,
"learning_rate": 0.00018993121301712193,
"loss": 0.7195,
"step": 32
},
{
"epoch": 0.7586206896551724,
"grad_norm": 0.03048304282128811,
"learning_rate": 0.00018873520750565718,
"loss": 0.7192,
"step": 33
},
{
"epoch": 0.7816091954022989,
"grad_norm": 0.03057900071144104,
"learning_rate": 0.00018747630845319612,
"loss": 0.7056,
"step": 34
},
{
"epoch": 0.8045977011494253,
"grad_norm": 0.03429551050066948,
"learning_rate": 0.0001861554081393806,
"loss": 0.7004,
"step": 35
},
{
"epoch": 0.8275862068965517,
"grad_norm": 0.03101194091141224,
"learning_rate": 0.0001847734427889671,
"loss": 0.72,
"step": 36
},
{
"epoch": 0.8505747126436781,
"grad_norm": 0.030171377584338188,
"learning_rate": 0.0001833313919082515,
"loss": 0.7169,
"step": 37
},
{
"epoch": 0.8735632183908046,
"grad_norm": 0.03114650957286358,
"learning_rate": 0.0001818302775908169,
"loss": 0.7078,
"step": 38
},
{
"epoch": 0.896551724137931,
"grad_norm": 0.025148095563054085,
"learning_rate": 0.00018027116379309638,
"loss": 0.699,
"step": 39
},
{
"epoch": 0.9195402298850575,
"grad_norm": 0.02687731944024563,
"learning_rate": 0.00017865515558026428,
"loss": 0.702,
"step": 40
},
{
"epoch": 0.9195402298850575,
"eval_loss": 0.702426552772522,
"eval_runtime": 304.0078,
"eval_samples_per_second": 32.558,
"eval_steps_per_second": 0.128,
"step": 40
},
{
"epoch": 0.9425287356321839,
"grad_norm": 0.021480431780219078,
"learning_rate": 0.00017698339834299061,
"loss": 0.7041,
"step": 41
},
{
"epoch": 0.9655172413793104,
"grad_norm": 0.0241087656468153,
"learning_rate": 0.00017525707698561385,
"loss": 0.7015,
"step": 42
},
{
"epoch": 0.9885057471264368,
"grad_norm": 0.021253138780593872,
"learning_rate": 0.00017347741508630672,
"loss": 0.705,
"step": 43
},
{
"epoch": 1.0,
"grad_norm": 0.035563599318265915,
"learning_rate": 0.00017164567402983152,
"loss": 0.6885,
"step": 44
},
{
"epoch": 1.0229885057471264,
"grad_norm": 0.03471782058477402,
"learning_rate": 0.0001697631521134985,
"loss": 0.6895,
"step": 45
},
{
"epoch": 1.0459770114942528,
"grad_norm": 0.021441299468278885,
"learning_rate": 0.00016783118362696163,
"loss": 0.6928,
"step": 46
},
{
"epoch": 1.0689655172413792,
"grad_norm": 0.027905387803912163,
"learning_rate": 0.00016585113790650388,
"loss": 0.6954,
"step": 47
},
{
"epoch": 1.0919540229885056,
"grad_norm": 0.022263407707214355,
"learning_rate": 0.00016382441836448202,
"loss": 0.6869,
"step": 48
},
{
"epoch": 1.1149425287356323,
"grad_norm": 0.02491128444671631,
"learning_rate": 0.0001617524614946192,
"loss": 0.6955,
"step": 49
},
{
"epoch": 1.1379310344827587,
"grad_norm": 0.025002798065543175,
"learning_rate": 0.00015963673585385016,
"loss": 0.6926,
"step": 50
},
{
"epoch": 1.1379310344827587,
"eval_loss": 0.690385103225708,
"eval_runtime": 304.7579,
"eval_samples_per_second": 32.478,
"eval_steps_per_second": 0.128,
"step": 50
},
{
"epoch": 1.160919540229885,
"grad_norm": 0.022065704688429832,
"learning_rate": 0.0001574787410214407,
"loss": 0.6894,
"step": 51
},
{
"epoch": 1.1839080459770115,
"grad_norm": 0.02315184473991394,
"learning_rate": 0.00015528000653611935,
"loss": 0.6898,
"step": 52
},
{
"epoch": 1.206896551724138,
"grad_norm": 0.02273298054933548,
"learning_rate": 0.00015304209081197425,
"loss": 0.683,
"step": 53
},
{
"epoch": 1.2298850574712643,
"grad_norm": 0.025234676897525787,
"learning_rate": 0.000150766580033884,
"loss": 0.6857,
"step": 54
},
{
"epoch": 1.2528735632183907,
"grad_norm": 0.02536899223923683,
"learning_rate": 0.00014845508703326504,
"loss": 0.706,
"step": 55
},
{
"epoch": 1.2758620689655173,
"grad_norm": 0.020374612882733345,
"learning_rate": 0.0001461092501449326,
"loss": 0.6911,
"step": 56
},
{
"epoch": 1.2988505747126438,
"grad_norm": 0.019521547481417656,
"learning_rate": 0.00014373073204588556,
"loss": 0.6865,
"step": 57
},
{
"epoch": 1.3218390804597702,
"grad_norm": 0.023520860821008682,
"learning_rate": 0.00014132121857683783,
"loss": 0.6751,
"step": 58
},
{
"epoch": 1.3448275862068966,
"grad_norm": 0.02198459580540657,
"learning_rate": 0.00013888241754733208,
"loss": 0.6833,
"step": 59
},
{
"epoch": 1.367816091954023,
"grad_norm": 0.023501180112361908,
"learning_rate": 0.00013641605752528224,
"loss": 0.6795,
"step": 60
},
{
"epoch": 1.367816091954023,
"eval_loss": 0.6821420192718506,
"eval_runtime": 304.5567,
"eval_samples_per_second": 32.5,
"eval_steps_per_second": 0.128,
"step": 60
},
{
"epoch": 1.3908045977011494,
"grad_norm": 0.021825293079018593,
"learning_rate": 0.00013392388661180303,
"loss": 0.6751,
"step": 61
},
{
"epoch": 1.4137931034482758,
"grad_norm": 0.022589631378650665,
"learning_rate": 0.0001314076712021949,
"loss": 0.6744,
"step": 62
},
{
"epoch": 1.4367816091954024,
"grad_norm": 0.022593483328819275,
"learning_rate": 0.0001288691947339621,
"loss": 0.6648,
"step": 63
},
{
"epoch": 1.4597701149425286,
"grad_norm": 0.019547760486602783,
"learning_rate": 0.00012631025642275212,
"loss": 0.6852,
"step": 64
},
{
"epoch": 1.4827586206896552,
"grad_norm": 0.0213349349796772,
"learning_rate": 0.0001237326699871115,
"loss": 0.6861,
"step": 65
},
{
"epoch": 1.5057471264367817,
"grad_norm": 0.02310025505721569,
"learning_rate": 0.00012113826236296244,
"loss": 0.6683,
"step": 66
},
{
"epoch": 1.528735632183908,
"grad_norm": 0.02816838026046753,
"learning_rate": 0.00011852887240871145,
"loss": 0.6937,
"step": 67
},
{
"epoch": 1.5517241379310345,
"grad_norm": 0.020846517756581306,
"learning_rate": 0.00011590634960190721,
"loss": 0.6514,
"step": 68
},
{
"epoch": 1.5747126436781609,
"grad_norm": 0.021266207098960876,
"learning_rate": 0.00011327255272837221,
"loss": 0.672,
"step": 69
},
{
"epoch": 1.5977011494252875,
"grad_norm": 0.021223735064268112,
"learning_rate": 0.00011062934856473655,
"loss": 0.6694,
"step": 70
},
{
"epoch": 1.5977011494252875,
"eval_loss": 0.6756598353385925,
"eval_runtime": 307.17,
"eval_samples_per_second": 32.223,
"eval_steps_per_second": 0.127,
"step": 70
},
{
"epoch": 1.6206896551724137,
"grad_norm": 0.02269381284713745,
"learning_rate": 0.00010797861055530831,
"loss": 0.6756,
"step": 71
},
{
"epoch": 1.6436781609195403,
"grad_norm": 0.020797230303287506,
"learning_rate": 0.00010532221748421787,
"loss": 0.6776,
"step": 72
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.02278304472565651,
"learning_rate": 0.00010266205214377748,
"loss": 0.6634,
"step": 73
},
{
"epoch": 1.6896551724137931,
"grad_norm": 0.028367692604660988,
"learning_rate": 0.0001,
"loss": 0.6644,
"step": 74
},
{
"epoch": 1.7126436781609196,
"grad_norm": 0.026720669120550156,
"learning_rate": 9.733794785622253e-05,
"loss": 0.692,
"step": 75
},
{
"epoch": 1.735632183908046,
"grad_norm": 0.02203705534338951,
"learning_rate": 9.467778251578217e-05,
"loss": 0.669,
"step": 76
},
{
"epoch": 1.7586206896551724,
"grad_norm": 0.025563135743141174,
"learning_rate": 9.202138944469168e-05,
"loss": 0.6778,
"step": 77
},
{
"epoch": 1.7816091954022988,
"grad_norm": 0.026585958898067474,
"learning_rate": 8.937065143526347e-05,
"loss": 0.6616,
"step": 78
},
{
"epoch": 1.8045977011494254,
"grad_norm": 0.023832708597183228,
"learning_rate": 8.672744727162781e-05,
"loss": 0.6785,
"step": 79
},
{
"epoch": 1.8275862068965516,
"grad_norm": 0.03144339472055435,
"learning_rate": 8.409365039809281e-05,
"loss": 0.674,
"step": 80
},
{
"epoch": 1.8275862068965516,
"eval_loss": 0.6707818508148193,
"eval_runtime": 303.5438,
"eval_samples_per_second": 32.608,
"eval_steps_per_second": 0.128,
"step": 80
},
{
"epoch": 1.8505747126436782,
"grad_norm": 0.02507634088397026,
"learning_rate": 8.147112759128859e-05,
"loss": 0.678,
"step": 81
},
{
"epoch": 1.8735632183908046,
"grad_norm": 0.024481408298015594,
"learning_rate": 7.886173763703757e-05,
"loss": 0.6598,
"step": 82
},
{
"epoch": 1.896551724137931,
"grad_norm": 0.022987501695752144,
"learning_rate": 7.626733001288851e-05,
"loss": 0.6679,
"step": 83
},
{
"epoch": 1.9195402298850575,
"grad_norm": 0.025647416710853577,
"learning_rate": 7.368974357724789e-05,
"loss": 0.6735,
"step": 84
},
{
"epoch": 1.9425287356321839,
"grad_norm": 0.024662526324391365,
"learning_rate": 7.113080526603792e-05,
"loss": 0.6423,
"step": 85
},
{
"epoch": 1.9655172413793105,
"grad_norm": 0.025277674198150635,
"learning_rate": 6.859232879780515e-05,
"loss": 0.6815,
"step": 86
},
{
"epoch": 1.9885057471264367,
"grad_norm": 0.022437684237957,
"learning_rate": 6.607611338819697e-05,
"loss": 0.6788,
"step": 87
},
{
"epoch": 2.0,
"grad_norm": 0.03651784360408783,
"learning_rate": 6.358394247471778e-05,
"loss": 0.7069,
"step": 88
},
{
"epoch": 2.0229885057471266,
"grad_norm": 0.023186692968010902,
"learning_rate": 6.111758245266794e-05,
"loss": 0.6604,
"step": 89
},
{
"epoch": 2.045977011494253,
"grad_norm": 0.02420475333929062,
"learning_rate": 5.867878142316221e-05,
"loss": 0.6543,
"step": 90
},
{
"epoch": 2.045977011494253,
"eval_loss": 0.6659972071647644,
"eval_runtime": 302.4198,
"eval_samples_per_second": 32.729,
"eval_steps_per_second": 0.129,
"step": 90
},
{
"epoch": 2.0689655172413794,
"grad_norm": 0.0236943569034338,
"learning_rate": 5.626926795411447e-05,
"loss": 0.647,
"step": 91
},
{
"epoch": 2.0919540229885056,
"grad_norm": 0.024277806282043457,
"learning_rate": 5.38907498550674e-05,
"loss": 0.6541,
"step": 92
},
{
"epoch": 2.1149425287356323,
"grad_norm": 0.023934554308652878,
"learning_rate": 5.1544912966734994e-05,
"loss": 0.672,
"step": 93
},
{
"epoch": 2.1379310344827585,
"grad_norm": 0.022945648059248924,
"learning_rate": 4.9233419966116036e-05,
"loss": 0.6401,
"step": 94
},
{
"epoch": 2.160919540229885,
"grad_norm": 0.026299618184566498,
"learning_rate": 4.695790918802576e-05,
"loss": 0.6547,
"step": 95
},
{
"epoch": 2.1839080459770113,
"grad_norm": 0.022878944873809814,
"learning_rate": 4.47199934638807e-05,
"loss": 0.6664,
"step": 96
},
{
"epoch": 2.206896551724138,
"grad_norm": 0.023859554901719093,
"learning_rate": 4.252125897855932e-05,
"loss": 0.6571,
"step": 97
},
{
"epoch": 2.2298850574712645,
"grad_norm": 0.02457576058804989,
"learning_rate": 4.036326414614985e-05,
"loss": 0.6682,
"step": 98
},
{
"epoch": 2.2528735632183907,
"grad_norm": 0.023506339639425278,
"learning_rate": 3.824753850538082e-05,
"loss": 0.6555,
"step": 99
},
{
"epoch": 2.2758620689655173,
"grad_norm": 0.038815535604953766,
"learning_rate": 3.617558163551802e-05,
"loss": 0.6492,
"step": 100
},
{
"epoch": 2.2758620689655173,
"eval_loss": 0.6638627052307129,
"eval_runtime": 305.4045,
"eval_samples_per_second": 32.409,
"eval_steps_per_second": 0.128,
"step": 100
},
{
"epoch": 2.2988505747126435,
"grad_norm": 0.02231566794216633,
"learning_rate": 3.414886209349615e-05,
"loss": 0.6485,
"step": 101
},
{
"epoch": 2.32183908045977,
"grad_norm": 0.023987093940377235,
"learning_rate": 3.216881637303839e-05,
"loss": 0.6549,
"step": 102
},
{
"epoch": 2.344827586206897,
"grad_norm": 0.026101969182491302,
"learning_rate": 3.0236847886501542e-05,
"loss": 0.6593,
"step": 103
},
{
"epoch": 2.367816091954023,
"grad_norm": 0.023063533008098602,
"learning_rate": 2.8354325970168484e-05,
"loss": 0.6535,
"step": 104
},
{
"epoch": 2.3908045977011496,
"grad_norm": 0.023235062137246132,
"learning_rate": 2.6522584913693294e-05,
"loss": 0.652,
"step": 105
},
{
"epoch": 2.413793103448276,
"grad_norm": 0.02247541956603527,
"learning_rate": 2.4742923014386156e-05,
"loss": 0.6359,
"step": 106
},
{
"epoch": 2.4367816091954024,
"grad_norm": 0.02353539690375328,
"learning_rate": 2.301660165700936e-05,
"loss": 0.6606,
"step": 107
},
{
"epoch": 2.4597701149425286,
"grad_norm": 0.02745731920003891,
"learning_rate": 2.1344844419735755e-05,
"loss": 0.6435,
"step": 108
},
{
"epoch": 2.4827586206896552,
"grad_norm": 0.024043822661042213,
"learning_rate": 1.9728836206903656e-05,
"loss": 0.6548,
"step": 109
},
{
"epoch": 2.5057471264367814,
"grad_norm": 0.022284943610429764,
"learning_rate": 1.8169722409183097e-05,
"loss": 0.6508,
"step": 110
},
{
"epoch": 2.5057471264367814,
"eval_loss": 0.662615954875946,
"eval_runtime": 299.7437,
"eval_samples_per_second": 33.022,
"eval_steps_per_second": 0.13,
"step": 110
},
{
"epoch": 2.528735632183908,
"grad_norm": 0.02287135273218155,
"learning_rate": 1.6668608091748495e-05,
"loss": 0.6384,
"step": 111
},
{
"epoch": 2.5517241379310347,
"grad_norm": 0.02666754461824894,
"learning_rate": 1.522655721103291e-05,
"loss": 0.6628,
"step": 112
},
{
"epoch": 2.574712643678161,
"grad_norm": 0.023315856233239174,
"learning_rate": 1.3844591860619383e-05,
"loss": 0.6634,
"step": 113
},
{
"epoch": 2.5977011494252875,
"grad_norm": 0.023025084286928177,
"learning_rate": 1.2523691546803873e-05,
"loss": 0.6711,
"step": 114
},
{
"epoch": 2.6206896551724137,
"grad_norm": 0.023308036848902702,
"learning_rate": 1.1264792494342857e-05,
"loss": 0.6421,
"step": 115
},
{
"epoch": 2.6436781609195403,
"grad_norm": 0.022443862631917,
"learning_rate": 1.0068786982878087e-05,
"loss": 0.6557,
"step": 116
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.0229327455163002,
"learning_rate": 8.936522714508678e-06,
"loss": 0.6709,
"step": 117
},
{
"epoch": 2.689655172413793,
"grad_norm": 0.0223649051040411,
"learning_rate": 7.868802212958703e-06,
"loss": 0.6493,
"step": 118
},
{
"epoch": 2.7126436781609193,
"grad_norm": 0.022036027163267136,
"learning_rate": 6.866382254766157e-06,
"loss": 0.6552,
"step": 119
},
{
"epoch": 2.735632183908046,
"grad_norm": 0.022042695432901382,
"learning_rate": 5.929973332896677e-06,
"loss": 0.656,
"step": 120
},
{
"epoch": 2.735632183908046,
"eval_loss": 0.6621032953262329,
"eval_runtime": 304.5155,
"eval_samples_per_second": 32.504,
"eval_steps_per_second": 0.128,
"step": 120
},
{
"epoch": 2.7586206896551726,
"grad_norm": 0.026914741843938828,
"learning_rate": 5.060239153161872e-06,
"loss": 0.6578,
"step": 121
},
{
"epoch": 2.781609195402299,
"grad_norm": 0.022699004039168358,
"learning_rate": 4.257796163799455e-06,
"loss": 0.6477,
"step": 122
},
{
"epoch": 2.8045977011494254,
"grad_norm": 0.02309352532029152,
"learning_rate": 3.5232131185484076e-06,
"loss": 0.6447,
"step": 123
},
{
"epoch": 2.8275862068965516,
"grad_norm": 0.02277517318725586,
"learning_rate": 2.857010673529015e-06,
"loss": 0.6427,
"step": 124
},
{
"epoch": 2.8505747126436782,
"grad_norm": 0.027453523129224777,
"learning_rate": 2.259661018213333e-06,
"loss": 0.6593,
"step": 125
},
{
"epoch": 2.873563218390805,
"grad_norm": 0.022547965869307518,
"learning_rate": 1.7315875407479032e-06,
"loss": 0.6637,
"step": 126
},
{
"epoch": 2.896551724137931,
"grad_norm": 0.022168701514601707,
"learning_rate": 1.2731645278655445e-06,
"loss": 0.6539,
"step": 127
},
{
"epoch": 2.9195402298850572,
"grad_norm": 0.0227156113833189,
"learning_rate": 8.847168995992916e-07,
"loss": 0.6673,
"step": 128
},
{
"epoch": 2.942528735632184,
"grad_norm": 0.022653989493846893,
"learning_rate": 5.665199789862907e-07,
"loss": 0.6633,
"step": 129
},
{
"epoch": 2.9655172413793105,
"grad_norm": 0.022713936865329742,
"learning_rate": 3.1879929692498757e-07,
"loss": 0.6492,
"step": 130
},
{
"epoch": 2.9655172413793105,
"eval_loss": 0.661920964717865,
"eval_runtime": 301.3689,
"eval_samples_per_second": 32.843,
"eval_steps_per_second": 0.129,
"step": 130
},
{
"epoch": 2.9885057471264367,
"grad_norm": 0.0218950342386961,
"learning_rate": 1.4173043232380557e-07,
"loss": 0.6681,
"step": 131
},
{
"epoch": 3.0,
"grad_norm": 0.033240437507629395,
"learning_rate": 3.5438887654737355e-08,
"loss": 0.6613,
"step": 132
},
{
"epoch": 3.0,
"step": 132,
"total_flos": 4.878497440777581e+19,
"train_loss": 0.7089937537005453,
"train_runtime": 30749.2873,
"train_samples_per_second": 8.691,
"train_steps_per_second": 0.004
}
],
"logging_steps": 1.0,
"max_steps": 132,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.878497440777581e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}