|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 639, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003129890453834116, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006259780907668232, |
|
"grad_norm": NaN, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009389671361502348, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012519561815336464, |
|
"grad_norm": NaN, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01564945226917058, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018779342723004695, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02190923317683881, |
|
"grad_norm": NaN, |
|
"learning_rate": 7e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.025039123630672927, |
|
"grad_norm": NaN, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.028169014084507043, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03129890453834116, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03442879499217527, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.999972486908167e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03755868544600939, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.999889947935455e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0406885758998435, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.999752383990224e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04381846635367762, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.999559796586399e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.046948356807511735, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.99931218784345e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.050078247261345854, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.99900956048637e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.053208137715179966, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.998651917845645e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.056338028169014086, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.998239263857216e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0594679186228482, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.997771603062438e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06259780907668232, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.997248940608029e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06572769953051644, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.99667128224601e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06885758998435054, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.996038634333654e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07198748043818466, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.995351003833398e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07511737089201878, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.994608398312777e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0782472613458529, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.993810825944343e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.081377151799687, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.992958295505567e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08450704225352113, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.992050816378749e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08763693270735524, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.991088398550913e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09076682316118936, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.990071052613693e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09389671361502347, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.988998789763222e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09702660406885759, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.987871621800006e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.10015649452269171, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.986689561128798e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10328638497652583, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.985452620758453e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10641627543035993, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.984160814301794e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10954616588419405, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.982814155975455e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11267605633802817, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.981412660599732e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11580594679186229, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.979956343598413e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1189358372456964, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.97844522099861e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12206572769953052, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.976879309430586e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12519561815336464, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.975258626127568e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12832550860719874, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.97358318892556e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13145539906103287, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.971853016263143e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13458528951486698, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.97006812718128e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.13771517996870108, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.968228541323094e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.14084507042253522, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.96633427893367e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14397496087636932, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.964385360859805e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14710485133020346, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.962381808549807e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15023474178403756, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.960323644053248e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15336463223787167, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.95821089002071e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1564945226917058, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.956043569703553e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1596244131455399, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.953821706953651e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.162754303599374, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.951545326223128e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.16588419405320814, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.949214452564096e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.16901408450704225, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.946829111628367e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.17214397496087636, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.944389329667187e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1752738654147105, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.941895133530929e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.1784037558685446, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.939346550668817e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.18153364632237873, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.936743609128607e-05, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.18466353677621283, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.934086337556285e-05, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.18779342723004694, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.931374765195756e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19092331768388107, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.928608921888514e-05, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.19405320813771518, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.925788838073322e-05, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.19718309859154928, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.922914544785867e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.20031298904538342, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.919986073658429e-05, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.20344287949921752, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.917003456919524e-05, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.20657276995305165, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.913966727393558e-05, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.20970266040688576, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.910875918500456e-05, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.21283255086071987, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.907731064255304e-05, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.215962441314554, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.904532199267966e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2190923317683881, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.901279358742706e-05, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.897972578477809e-05, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.22535211267605634, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.894611894865171e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.22848200312989045, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.891197344889913e-05, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.23161189358372458, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.887728966129963e-05, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.2347417840375587, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.884206796755654e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2378716744913928, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.880630875529291e-05, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.24100156494522693, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.877001241804735e-05, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.24413145539906103, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.873317935526963e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.24726134585289514, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.869580997231634e-05, |
|
"loss": 0.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.25039123630672927, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.865790468044637e-05, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2535211267605634, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.861946389681645e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.2566510172143975, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.85804880444765e-05, |
|
"loss": 0.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.2597809076682316, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.854097755236499e-05, |
|
"loss": 0.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.26291079812206575, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.850093285530424e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.26604068857589985, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.846035439399564e-05, |
|
"loss": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.26917057902973396, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.841924261501474e-05, |
|
"loss": 0.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.27230046948356806, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.837759797080641e-05, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.27543035993740217, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.833542091967982e-05, |
|
"loss": 0.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.27856025039123633, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.82927119258034e-05, |
|
"loss": 0.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.28169014084507044, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.824947145919974e-05, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28482003129890454, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.820569999574041e-05, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.28794992175273865, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.816139801714073e-05, |
|
"loss": 0.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.29107981220657275, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.811656601095446e-05, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.2942097026604069, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.807120447056844e-05, |
|
"loss": 0.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.297339593114241, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.802531389519716e-05, |
|
"loss": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3004694835680751, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.797889478987727e-05, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.30359937402190923, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.793194766546201e-05, |
|
"loss": 0.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.30672926447574334, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.78844730386156e-05, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.30985915492957744, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.783647143180754e-05, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.3129890453834116, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.77879433733069e-05, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3161189358372457, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.773888939717641e-05, |
|
"loss": 0.0, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.3192488262910798, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.768931004326674e-05, |
|
"loss": 0.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.3223787167449139, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.763920585721037e-05, |
|
"loss": 0.0, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.325508607198748, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.758857739041575e-05, |
|
"loss": 0.0, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.3286384976525822, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.753742520006117e-05, |
|
"loss": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.3317683881064163, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.748574984908854e-05, |
|
"loss": 0.0, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3348982785602504, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.743355190619737e-05, |
|
"loss": 0.0, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.3380281690140845, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.738083194583836e-05, |
|
"loss": 0.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.3411580594679186, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.732759054820718e-05, |
|
"loss": 0.0, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.3442879499217527, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.727382829923797e-05, |
|
"loss": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3474178403755869, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.721954579059705e-05, |
|
"loss": 0.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.350547730829421, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.716474361967625e-05, |
|
"loss": 0.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.3536776212832551, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.710942238958645e-05, |
|
"loss": 0.0, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.3568075117370892, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.705358270915086e-05, |
|
"loss": 0.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.3599374021909233, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.699722519289843e-05, |
|
"loss": 0.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.36306729264475746, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.694035046105693e-05, |
|
"loss": 0.0, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.36619718309859156, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.688295913954625e-05, |
|
"loss": 0.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.36932707355242567, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.68250518599715e-05, |
|
"loss": 0.0, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.37245696400625977, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.676662925961595e-05, |
|
"loss": 0.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.3755868544600939, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.670769198143418e-05, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.37871674491392804, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.66482406740449e-05, |
|
"loss": 0.0, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.38184663536776214, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.65882759917238e-05, |
|
"loss": 0.0, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.38497652582159625, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.65277985943964e-05, |
|
"loss": 0.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.38810641627543035, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.64668091476308e-05, |
|
"loss": 0.0, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.39123630672926446, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.640530832263027e-05, |
|
"loss": 0.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.39436619718309857, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.634329679622598e-05, |
|
"loss": 0.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.3974960876369327, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.628077525086942e-05, |
|
"loss": 0.0, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.40062597809076683, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.621774437462503e-05, |
|
"loss": 0.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.40375586854460094, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.615420486116251e-05, |
|
"loss": 0.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.40688575899843504, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.609015740974925e-05, |
|
"loss": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41001564945226915, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.602560272524263e-05, |
|
"loss": 0.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.4131455399061033, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.596054151808222e-05, |
|
"loss": 0.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.4162754303599374, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.5894974504282e-05, |
|
"loss": 0.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.4194053208137715, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.582890240542249e-05, |
|
"loss": 0.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.4225352112676056, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.576232594864277e-05, |
|
"loss": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.42566510172143973, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.569524586663253e-05, |
|
"loss": 0.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.4287949921752739, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.562766289762392e-05, |
|
"loss": 0.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.431924882629108, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.555957778538354e-05, |
|
"loss": 0.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.4350547730829421, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.549099127920414e-05, |
|
"loss": 0.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.4381846635367762, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.542190413389649e-05, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.4413145539906103, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.535231710978097e-05, |
|
"loss": 0.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.528223097267924e-05, |
|
"loss": 0.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.4475743348982786, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.521164649390585e-05, |
|
"loss": 0.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.4507042253521127, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.514056445025967e-05, |
|
"loss": 0.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.4538341158059468, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.506898562401545e-05, |
|
"loss": 0.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.4569640062597809, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.499691080291511e-05, |
|
"loss": 0.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.460093896713615, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.492434078015911e-05, |
|
"loss": 0.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.46322378716744916, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.485127635439772e-05, |
|
"loss": 0.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.46635367762128327, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.477771832972226e-05, |
|
"loss": 0.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.4694835680751174, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.47036675156562e-05, |
|
"loss": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.4726134585289515, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.462912472714627e-05, |
|
"loss": 0.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.4757433489827856, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.455409078455353e-05, |
|
"loss": 0.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.4788732394366197, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.447856651364426e-05, |
|
"loss": 0.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.48200312989045385, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.440255274558094e-05, |
|
"loss": 0.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.48513302034428796, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.432605031691309e-05, |
|
"loss": 0.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.48826291079812206, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.424906006956805e-05, |
|
"loss": 0.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.49139280125195617, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.41715828508417e-05, |
|
"loss": 0.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.4945226917057903, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.40936195133892e-05, |
|
"loss": 0.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.49765258215962443, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.401517091521553e-05, |
|
"loss": 0.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.5007824726134585, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.393623791966607e-05, |
|
"loss": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5039123630672926, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.385682139541713e-05, |
|
"loss": 0.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.5070422535211268, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.377692221646635e-05, |
|
"loss": 0.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.5101721439749609, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.369654126212313e-05, |
|
"loss": 0.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.513302034428795, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.361567941699889e-05, |
|
"loss": 0.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.5164319248826291, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.353433757099736e-05, |
|
"loss": 0.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.5195618153364632, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.345251661930486e-05, |
|
"loss": 0.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.5226917057902973, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.337021746238028e-05, |
|
"loss": 0.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.5258215962441315, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.328744100594535e-05, |
|
"loss": 0.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.5289514866979655, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.320418816097456e-05, |
|
"loss": 0.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.5320813771517997, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.31204598436852e-05, |
|
"loss": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5352112676056338, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.303625697552721e-05, |
|
"loss": 0.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.5383411580594679, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.295158048317307e-05, |
|
"loss": 0.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.5414710485133021, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.286643129850765e-05, |
|
"loss": 0.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.5446009389671361, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.278081035861787e-05, |
|
"loss": 0.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.5477308294209703, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.269471860578245e-05, |
|
"loss": 0.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.5508607198748043, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.26081569874615e-05, |
|
"loss": 0.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.5539906103286385, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.252112645628615e-05, |
|
"loss": 0.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.5571205007824727, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.243362797004795e-05, |
|
"loss": 0.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.5602503912363067, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.23456624916885e-05, |
|
"loss": 0.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.5633802816901409, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.225723098928869e-05, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5665101721439749, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.216833443605814e-05, |
|
"loss": 0.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.5696400625978091, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.207897381032449e-05, |
|
"loss": 0.0, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.5727699530516432, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.198915009552253e-05, |
|
"loss": 0.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.5758998435054773, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.189886428018355e-05, |
|
"loss": 0.0, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.5790297339593115, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.180811735792431e-05, |
|
"loss": 0.0, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.5821596244131455, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.171691032743615e-05, |
|
"loss": 0.0, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.5852895148669797, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.162524419247407e-05, |
|
"loss": 0.0, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.5884194053208138, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.153311996184557e-05, |
|
"loss": 0.0, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.5915492957746479, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.144053864939958e-05, |
|
"loss": 0.0, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.594679186228482, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.134750127401544e-05, |
|
"loss": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5978090766823161, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.125400885959141e-05, |
|
"loss": 0.0, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.6009389671361502, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.11600624350337e-05, |
|
"loss": 0.0, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.6040688575899843, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.106566303424492e-05, |
|
"loss": 0.0, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.6071987480438185, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.097081169611283e-05, |
|
"loss": 0.0, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.6103286384976526, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.087550946449888e-05, |
|
"loss": 0.0, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.6134585289514867, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.077975738822666e-05, |
|
"loss": 0.0, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.6165884194053208, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.068355652107045e-05, |
|
"loss": 0.0, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.6197183098591549, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.058690792174358e-05, |
|
"loss": 0.0, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.622848200312989, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.048981265388676e-05, |
|
"loss": 0.0, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.6259780907668232, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.039227178605639e-05, |
|
"loss": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6291079812206573, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.029428639171281e-05, |
|
"loss": 0.0, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.6322378716744914, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.019585754920847e-05, |
|
"loss": 0.0, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.6353677621283255, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.009698634177613e-05, |
|
"loss": 0.0, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.6384976525821596, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.999767385751678e-05, |
|
"loss": 0.0, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.6416275430359938, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.989792118938784e-05, |
|
"loss": 0.0, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.6447574334898278, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.979772943519106e-05, |
|
"loss": 0.0, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.647887323943662, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.96970996975604e-05, |
|
"loss": 0.0, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.651017214397496, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.959603308394991e-05, |
|
"loss": 0.0, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.6541471048513302, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.949453070662165e-05, |
|
"loss": 0.0, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.6572769953051644, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.939259368263329e-05, |
|
"loss": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6604068857589984, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.929022313382589e-05, |
|
"loss": 0.0, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.6635367762128326, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.918742018681161e-05, |
|
"loss": 0.0, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.90841859729612e-05, |
|
"loss": 0.0, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.6697965571205008, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.898052162839162e-05, |
|
"loss": 0.0, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.672926447574335, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.887642829395353e-05, |
|
"loss": 0.0, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.676056338028169, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.877190711521872e-05, |
|
"loss": 0.0, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.6791862284820032, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.86669592424675e-05, |
|
"loss": 0.0, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.6823161189358372, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.856158583067607e-05, |
|
"loss": 0.0, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.6854460093896714, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.845578803950373e-05, |
|
"loss": 0.0, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.6885758998435054, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.834956703328026e-05, |
|
"loss": 0.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6917057902973396, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.824292398099298e-05, |
|
"loss": 0.0, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.6948356807511737, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.813586005627389e-05, |
|
"loss": 0.0, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.6979655712050078, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.802837643738686e-05, |
|
"loss": 0.0, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.701095461658842, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.792047430721456e-05, |
|
"loss": 0.0, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.704225352112676, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.781215485324544e-05, |
|
"loss": 0.0, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.7073552425665102, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.770341926756078e-05, |
|
"loss": 0.0, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.7104851330203443, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.759426874682142e-05, |
|
"loss": 0.0, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.7136150234741784, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.748470449225467e-05, |
|
"loss": 0.0, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.7167449139280125, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.737472770964112e-05, |
|
"loss": 0.0, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.7198748043818466, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.726433960930126e-05, |
|
"loss": 0.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7230046948356808, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.715354140608229e-05, |
|
"loss": 0.0, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.7261345852895149, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.704233431934468e-05, |
|
"loss": 0.0, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.729264475743349, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.693071957294871e-05, |
|
"loss": 0.0, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.7323943661971831, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.68186983952411e-05, |
|
"loss": 0.0, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.7355242566510172, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.670627201904144e-05, |
|
"loss": 0.0, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.7386541471048513, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.659344168162861e-05, |
|
"loss": 0.0, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.7417840375586855, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.64802086247272e-05, |
|
"loss": 0.0, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.7449139280125195, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.636657409449378e-05, |
|
"loss": 0.0, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.7480438184663537, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.625253934150328e-05, |
|
"loss": 0.0, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.7511737089201878, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.613810562073512e-05, |
|
"loss": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7543035993740219, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.602327419155951e-05, |
|
"loss": 0.0, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.7574334898278561, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.590804631772352e-05, |
|
"loss": 0.0, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.7605633802816901, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.579242326733716e-05, |
|
"loss": 0.0, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.7636932707355243, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.56764063128595e-05, |
|
"loss": 0.0, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.7668231611893583, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.555999673108455e-05, |
|
"loss": 0.0, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.7699530516431925, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.544319580312734e-05, |
|
"loss": 0.0, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.7730829420970265, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.532600481440974e-05, |
|
"loss": 0.0, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.7762128325508607, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.52084250546463e-05, |
|
"loss": 0.0, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.7793427230046949, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.509045781783011e-05, |
|
"loss": 0.0, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.7824726134585289, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.497210440221857e-05, |
|
"loss": 0.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7856025039123631, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.485336611031902e-05, |
|
"loss": 0.0, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.7887323943661971, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.473424424887447e-05, |
|
"loss": 0.0, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.7918622848200313, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.46147401288492e-05, |
|
"loss": 0.0, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.7949921752738655, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.449485506541434e-05, |
|
"loss": 0.0, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.7981220657276995, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.437459037793342e-05, |
|
"loss": 0.0, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.8012519561815337, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.425394738994776e-05, |
|
"loss": 0.0, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.8043818466353677, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.413292742916203e-05, |
|
"loss": 0.0, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.8075117370892019, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.401153182742954e-05, |
|
"loss": 0.0, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.810641627543036, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.388976192073761e-05, |
|
"loss": 0.0, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.8137715179968701, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.376761904919293e-05, |
|
"loss": 0.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8169014084507042, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.364510455700666e-05, |
|
"loss": 0.0, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.8200312989045383, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.352221979247983e-05, |
|
"loss": 0.0, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.8231611893583725, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.339896610798836e-05, |
|
"loss": 0.0, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.8262910798122066, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.327534485996822e-05, |
|
"loss": 0.0, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.8294209702660407, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.315135740890052e-05, |
|
"loss": 0.0, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.8325508607198748, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.302700511929648e-05, |
|
"loss": 0.0, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.8356807511737089, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.290228935968252e-05, |
|
"loss": 0.0, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.838810641627543, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.277721150258508e-05, |
|
"loss": 0.0, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.8419405320813772, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.265177292451562e-05, |
|
"loss": 0.0, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.8450704225352113, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.252597500595534e-05, |
|
"loss": 0.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8482003129890454, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.239981913134012e-05, |
|
"loss": 0.0, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.8513302034428795, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.227330668904527e-05, |
|
"loss": 0.0, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.8544600938967136, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.214643907137012e-05, |
|
"loss": 0.0, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.8575899843505478, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.201921767452286e-05, |
|
"loss": 0.0, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.8607198748043818, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.189164389860507e-05, |
|
"loss": 0.0, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.863849765258216, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.176371914759635e-05, |
|
"loss": 0.0, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.86697965571205, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.163544482933888e-05, |
|
"loss": 0.0, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.8701095461658842, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.150682235552191e-05, |
|
"loss": 0.0, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.8732394366197183, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.137785314166619e-05, |
|
"loss": 0.0, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.8763693270735524, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.124853860710846e-05, |
|
"loss": 0.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8794992175273866, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.111888017498578e-05, |
|
"loss": 0.0, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.8826291079812206, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.098887927221993e-05, |
|
"loss": 0.0, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.8857589984350548, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.085853732950155e-05, |
|
"loss": 0.0, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.072785578127462e-05, |
|
"loss": 0.0, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.892018779342723, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.059683606572053e-05, |
|
"loss": 0.0, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.8951486697965572, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.046547962474221e-05, |
|
"loss": 0.0, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.8982785602503912, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.033378790394843e-05, |
|
"loss": 0.0, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.9014084507042254, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.020176235263776e-05, |
|
"loss": 0.0, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.9045383411580594, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.006940442378264e-05, |
|
"loss": 0.0, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.9076682316118936, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.993671557401339e-05, |
|
"loss": 0.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.9107981220657277, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.980369726360224e-05, |
|
"loss": 0.0, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.9139280125195618, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.967035095644716e-05, |
|
"loss": 0.0, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.917057902973396, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.953667812005584e-05, |
|
"loss": 0.0, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.92018779342723, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.94026802255295e-05, |
|
"loss": 0.0, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.9233176838810642, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.926835874754668e-05, |
|
"loss": 0.0, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.9264475743348983, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.913371516434704e-05, |
|
"loss": 0.0, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.9295774647887324, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.89987509577151e-05, |
|
"loss": 0.0, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.9327073552425665, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.886346761296389e-05, |
|
"loss": 0.0, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.9358372456964006, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.872786661891866e-05, |
|
"loss": 0.0, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.9389671361502347, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.859194946790043e-05, |
|
"loss": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9420970266040689, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.845571765570966e-05, |
|
"loss": 0.0, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.945226917057903, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.831917268160966e-05, |
|
"loss": 0.0, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.9483568075117371, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.81823160483102e-05, |
|
"loss": 0.0, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.9514866979655712, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.804514926195093e-05, |
|
"loss": 0.0, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.9546165884194053, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.790767383208481e-05, |
|
"loss": 0.0, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.9577464788732394, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.77698912716615e-05, |
|
"loss": 0.0, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.9608763693270735, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.763180309701068e-05, |
|
"loss": 0.0, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.9640062597809077, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.74934108278254e-05, |
|
"loss": 0.0, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.9671361502347418, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.735471598714535e-05, |
|
"loss": 0.0, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.9702660406885759, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.72157201013401e-05, |
|
"loss": 0.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.97339593114241, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.707642470009227e-05, |
|
"loss": 0.0, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.9765258215962441, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.69368313163807e-05, |
|
"loss": 0.0, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.9796557120500783, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.679694148646366e-05, |
|
"loss": 0.0, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.9827856025039123, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.665675674986181e-05, |
|
"loss": 0.0, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.9859154929577465, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.65162786493414e-05, |
|
"loss": 0.0, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.9890453834115805, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.637550873089718e-05, |
|
"loss": 0.0, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.9921752738654147, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.623444854373539e-05, |
|
"loss": 0.0, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.9953051643192489, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.609309964025681e-05, |
|
"loss": 0.0, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.9984350547730829, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.595146357603958e-05, |
|
"loss": 0.0, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.9984350547730829, |
|
"eval_loss": NaN, |
|
"eval_runtime": 15.1685, |
|
"eval_samples_per_second": 35.534, |
|
"eval_steps_per_second": 8.9, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.001564945226917, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.580954190982213e-05, |
|
"loss": 0.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.0046948356807512, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.566733620348596e-05, |
|
"loss": 0.0, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.0078247261345852, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.552484802203858e-05, |
|
"loss": 0.0, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.0109546165884193, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.538207893359611e-05, |
|
"loss": 0.0, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.0140845070422535, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.523903050936621e-05, |
|
"loss": 0.0, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.0172143974960877, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.509570432363061e-05, |
|
"loss": 0.0, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.0203442879499218, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.495210195372795e-05, |
|
"loss": 0.0, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.0234741784037558, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.48082249800363e-05, |
|
"loss": 0.0, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.02660406885759, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.46640749859558e-05, |
|
"loss": 0.0, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.029733959311424, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.451965355789129e-05, |
|
"loss": 0.0, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.0328638497652582, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.437496228523473e-05, |
|
"loss": 0.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.0359937402190924, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.423000276034786e-05, |
|
"loss": 0.0, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.0391236306729263, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.408477657854458e-05, |
|
"loss": 0.0, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.0422535211267605, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.393928533807334e-05, |
|
"loss": 0.0, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.0453834115805947, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.379353064009976e-05, |
|
"loss": 0.0, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.0485133020344288, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.364751408868877e-05, |
|
"loss": 0.0, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.051643192488263, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.350123729078705e-05, |
|
"loss": 0.0, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.054773082942097, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.335470185620543e-05, |
|
"loss": 0.0, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 1.057902973395931, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.320790939760105e-05, |
|
"loss": 0.0, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 1.0610328638497653, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.306086153045965e-05, |
|
"loss": 0.0, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 1.0641627543035994, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.291355987307784e-05, |
|
"loss": 0.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.0672926447574336, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.276600604654522e-05, |
|
"loss": 0.0, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.0704225352112675, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.261820167472658e-05, |
|
"loss": 0.0, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.0735524256651017, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.247014838424404e-05, |
|
"loss": 0.0, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 1.0766823161189358, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.232184780445907e-05, |
|
"loss": 0.0, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 1.07981220657277, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.217330156745471e-05, |
|
"loss": 0.0, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.0829420970266042, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.202451130801742e-05, |
|
"loss": 0.0, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 1.086071987480438, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.187547866361925e-05, |
|
"loss": 0.0, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 1.0892018779342723, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.172620527439976e-05, |
|
"loss": 0.0, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 1.0923317683881064, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.157669278314789e-05, |
|
"loss": 0.0, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 1.0954616588419406, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.1426942835284e-05, |
|
"loss": 0.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0985915492957747, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.127695707884176e-05, |
|
"loss": 0.0, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.1017214397496087, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.112673716444988e-05, |
|
"loss": 0.0, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.1048513302034428, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.09762847453141e-05, |
|
"loss": 0.0, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.107981220657277, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.082560147719893e-05, |
|
"loss": 0.0, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.067468901840937e-05, |
|
"loss": 0.0, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.1142410015649453, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.052354902977278e-05, |
|
"loss": 0.0, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.1173708920187793, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.037218317462051e-05, |
|
"loss": 0.0, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.1205007824726134, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.022059311876962e-05, |
|
"loss": 0.0, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.1236306729264476, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.006878053050459e-05, |
|
"loss": 0.0, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.1267605633802817, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.991674708055885e-05, |
|
"loss": 0.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.1298904538341157, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.976449444209654e-05, |
|
"loss": 0.0, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.1330203442879498, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.961202429069396e-05, |
|
"loss": 0.0, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.136150234741784, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.945933830432127e-05, |
|
"loss": 0.0, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.1392801251956182, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.930643816332385e-05, |
|
"loss": 0.0, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.1424100156494523, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.915332555040396e-05, |
|
"loss": 0.0, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.1455399061032865, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.900000215060216e-05, |
|
"loss": 0.0, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.1486697965571204, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.884646965127876e-05, |
|
"loss": 0.0, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.1517996870109546, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.869272974209525e-05, |
|
"loss": 0.0, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.1549295774647887, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.853878411499574e-05, |
|
"loss": 0.0, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.158059467918623, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.83846344641883e-05, |
|
"loss": 0.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.1611893583724568, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.823028248612632e-05, |
|
"loss": 0.0, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.164319248826291, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.807572987948986e-05, |
|
"loss": 0.0, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.1674491392801252, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.792097834516697e-05, |
|
"loss": 0.0, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.1705790297339593, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.776602958623487e-05, |
|
"loss": 0.0, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 1.1737089201877935, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.761088530794136e-05, |
|
"loss": 0.0, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.1768388106416277, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.745554721768597e-05, |
|
"loss": 0.0, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 1.1799687010954616, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.730001702500113e-05, |
|
"loss": 0.0, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 1.1830985915492958, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.714429644153344e-05, |
|
"loss": 0.0, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.18622848200313, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.698838718102478e-05, |
|
"loss": 0.0, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.189358372456964, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.683229095929347e-05, |
|
"loss": 0.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.192488262910798, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.66760094942154e-05, |
|
"loss": 0.0, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.1956181533646322, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.651954450570508e-05, |
|
"loss": 0.0, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.1987480438184663, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.636289771569673e-05, |
|
"loss": 0.0, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.2018779342723005, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.620607084812538e-05, |
|
"loss": 0.0, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.2050078247261347, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.604906562890783e-05, |
|
"loss": 0.0, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.2081377151799688, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.589188378592369e-05, |
|
"loss": 0.0, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.2112676056338028, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.573452704899633e-05, |
|
"loss": 0.0, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.214397496087637, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.557699714987393e-05, |
|
"loss": 0.0, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.217527386541471, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.541929582221027e-05, |
|
"loss": 0.0, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.2206572769953052, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.526142480154584e-05, |
|
"loss": 0.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.2237871674491392, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.51033858252886e-05, |
|
"loss": 0.0, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.2269170579029733, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.494518063269486e-05, |
|
"loss": 0.0, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.2300469483568075, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.478681096485022e-05, |
|
"loss": 0.0, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.2331768388106417, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.46282785646504e-05, |
|
"loss": 0.0, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.2363067292644758, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.446958517678197e-05, |
|
"loss": 0.0, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.2394366197183098, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.431073254770323e-05, |
|
"loss": 0.0, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.242566510172144, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.415172242562497e-05, |
|
"loss": 0.0, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.245696400625978, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.399255656049125e-05, |
|
"loss": 0.0, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.2488262910798122, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.383323670396006e-05, |
|
"loss": 0.0, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.2519561815336462, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.367376460938416e-05, |
|
"loss": 0.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2550860719874803, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.35141420317917e-05, |
|
"loss": 0.0, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 1.2582159624413145, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.335437072786692e-05, |
|
"loss": 0.0, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 1.2613458528951487, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.319445245593087e-05, |
|
"loss": 0.0, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.2644757433489828, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.303438897592197e-05, |
|
"loss": 0.0, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 1.267605633802817, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.28741820493767e-05, |
|
"loss": 0.0, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.2707355242566511, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.271383343941024e-05, |
|
"loss": 0.0, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.273865414710485, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.2553344910697e-05, |
|
"loss": 0.0, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 1.2769953051643192, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.23927182294512e-05, |
|
"loss": 0.0, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 1.2801251956181534, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.223195516340752e-05, |
|
"loss": 0.0, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 1.2832550860719873, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.207105748180158e-05, |
|
"loss": 0.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.2863849765258215, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.19100269553504e-05, |
|
"loss": 0.0, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 1.2895148669796557, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.174886535623307e-05, |
|
"loss": 0.0, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 1.2926447574334898, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.158757445807118e-05, |
|
"loss": 0.0, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 1.295774647887324, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.142615603590919e-05, |
|
"loss": 0.0, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 1.2989045383411582, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.126461186619508e-05, |
|
"loss": 0.0, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.302034428794992, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.110294372676065e-05, |
|
"loss": 0.0, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 1.3051643192488263, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.094115339680209e-05, |
|
"loss": 0.0, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 1.3082942097026604, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.077924265686027e-05, |
|
"loss": 0.0, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 1.3114241001564946, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.061721328880119e-05, |
|
"loss": 0.0, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 1.3145539906103285, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.0455067075796424e-05, |
|
"loss": 0.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.3176838810641627, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.029280580230342e-05, |
|
"loss": 0.0, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 1.3208137715179968, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.0130431254045904e-05, |
|
"loss": 0.0, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 1.323943661971831, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.996794521799425e-05, |
|
"loss": 0.0, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 1.3270735524256652, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.9805349482345706e-05, |
|
"loss": 0.0, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 1.3302034428794993, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.964264583650486e-05, |
|
"loss": 0.0, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.947983607106385e-05, |
|
"loss": 0.0, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 1.3364632237871674, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.931692197778269e-05, |
|
"loss": 0.0, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 1.3395931142410016, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.9153905349569525e-05, |
|
"loss": 0.0, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 1.3427230046948357, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.8990787980460974e-05, |
|
"loss": 0.0, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 1.3458528951486697, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.882757166560226e-05, |
|
"loss": 0.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.3489827856025038, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.866425820122758e-05, |
|
"loss": 0.0, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 1.352112676056338, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.8500849384640285e-05, |
|
"loss": 0.0, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 1.3552425665101722, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.833734701419308e-05, |
|
"loss": 0.0, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 1.3583724569640063, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.817375288926825e-05, |
|
"loss": 0.0, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 1.3615023474178405, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.801006881025788e-05, |
|
"loss": 0.0, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.3646322378716744, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.784629657854399e-05, |
|
"loss": 0.0, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 1.3677621283255086, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.768243799647879e-05, |
|
"loss": 0.0, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 1.3708920187793427, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.7518494867364725e-05, |
|
"loss": 0.0, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.374021909233177, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.7354468995434794e-05, |
|
"loss": 0.0, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 1.3771517996870108, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.71903621858325e-05, |
|
"loss": 0.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.380281690140845, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.7026176244592155e-05, |
|
"loss": 0.0, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.3834115805946792, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.686191297861892e-05, |
|
"loss": 0.0, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 1.3865414710485133, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.66975741956689e-05, |
|
"loss": 0.0, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.3896713615023475, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.653316170432932e-05, |
|
"loss": 0.0, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.3928012519561817, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.636867731399856e-05, |
|
"loss": 0.0, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.3959311424100156, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.620412283486629e-05, |
|
"loss": 0.0, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 1.3990610328638498, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.603950007789349e-05, |
|
"loss": 0.0, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.402190923317684, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.5874810854792606e-05, |
|
"loss": 0.0, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 1.405320813771518, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.571005697800748e-05, |
|
"loss": 0.0, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 1.408450704225352, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.554524026069354e-05, |
|
"loss": 0.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.4115805946791862, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.5380362516697794e-05, |
|
"loss": 0.0, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 1.4147104851330203, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.521542556053885e-05, |
|
"loss": 0.0, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 1.4178403755868545, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.505043120738693e-05, |
|
"loss": 0.0, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.4209702660406887, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.488538127304399e-05, |
|
"loss": 0.0, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 1.4241001564945228, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4720277573923595e-05, |
|
"loss": 0.0, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.4272300469483568, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.455512192703105e-05, |
|
"loss": 0.0, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.430359937402191, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4389916149943354e-05, |
|
"loss": 0.0, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 1.433489827856025, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.422466206078919e-05, |
|
"loss": 0.0, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 1.436619718309859, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.405936147822892e-05, |
|
"loss": 0.0, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.4397496087636932, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.3894016221434605e-05, |
|
"loss": 0.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.4428794992175273, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.372862811006992e-05, |
|
"loss": 0.0, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 1.4460093896713615, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.35631989642702e-05, |
|
"loss": 0.0, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.4491392801251957, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.3397730604622344e-05, |
|
"loss": 0.0, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 1.4522691705790298, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.323222485214484e-05, |
|
"loss": 0.0, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 1.455399061032864, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.306668352826765e-05, |
|
"loss": 0.0, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.458528951486698, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.290110845481224e-05, |
|
"loss": 0.0, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 1.461658841940532, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.2735501453971516e-05, |
|
"loss": 0.0, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 1.4647887323943662, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.256986434828969e-05, |
|
"loss": 0.0, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.4679186228482002, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.240419896064235e-05, |
|
"loss": 0.0, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 1.4710485133020343, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.2238507114216285e-05, |
|
"loss": 0.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.4741784037558685, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.2072790632489497e-05, |
|
"loss": 0.0, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.4773082942097027, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.19070513392111e-05, |
|
"loss": 0.0, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 1.4804381846635368, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.174129105838127e-05, |
|
"loss": 0.0, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 1.483568075117371, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.1575511614231096e-05, |
|
"loss": 0.0, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.486697965571205, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.140971483120265e-05, |
|
"loss": 0.0, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.489827856025039, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.1243902533928754e-05, |
|
"loss": 0.0, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 1.4929577464788732, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.1078076547212994e-05, |
|
"loss": 0.0, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.4960876369327074, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0912238696009616e-05, |
|
"loss": 0.0, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 1.4992175273865413, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0746390805403445e-05, |
|
"loss": 0.0, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.5023474178403755, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.058053470058974e-05, |
|
"loss": 0.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.5054773082942097, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.041467220685424e-05, |
|
"loss": 0.0, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 1.5086071987480438, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.024880514955292e-05, |
|
"loss": 0.0, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 1.511737089201878, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0082935354092044e-05, |
|
"loss": 0.0, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.5148669796557122, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.991706464590797e-05, |
|
"loss": 0.0, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 1.5179968701095463, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.975119485044709e-05, |
|
"loss": 0.0, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.5211267605633803, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.958532779314578e-05, |
|
"loss": 0.0, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.5242566510172144, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.9419465299410264e-05, |
|
"loss": 0.0, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 1.5273865414710484, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.9253609194596574e-05, |
|
"loss": 0.0, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.5305164319248825, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.908776130399039e-05, |
|
"loss": 0.0, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.5336463223787167, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.8921923452787025e-05, |
|
"loss": 0.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.5367762128325508, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.875609746607127e-05, |
|
"loss": 0.0, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.539906103286385, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.859028516879737e-05, |
|
"loss": 0.0, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.5430359937402192, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.842448838576891e-05, |
|
"loss": 0.0, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.5461658841940533, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.825870894161874e-05, |
|
"loss": 0.0, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 1.5492957746478875, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.8092948660788894e-05, |
|
"loss": 0.0, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.5524256651017214, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.79272093675105e-05, |
|
"loss": 0.0, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 1.5555555555555556, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.7761492885783734e-05, |
|
"loss": 0.0, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 1.5586854460093895, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.759580103935767e-05, |
|
"loss": 0.0, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.5618153364632237, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.7430135651710315e-05, |
|
"loss": 0.0, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 1.5649452269170578, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.7264498546028496e-05, |
|
"loss": 0.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.568075117370892, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.7098891545187755e-05, |
|
"loss": 0.0, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.5712050078247262, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.693331647173235e-05, |
|
"loss": 0.0, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 1.5743348982785603, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6767775147855186e-05, |
|
"loss": 0.0, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 1.5774647887323945, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.660226939537767e-05, |
|
"loss": 0.0, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.5805946791862286, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.643680103572981e-05, |
|
"loss": 0.0, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.5837245696400626, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.627137188993009e-05, |
|
"loss": 0.0, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 1.5868544600938967, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6105983778565406e-05, |
|
"loss": 0.0, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.5899843505477307, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.594063852177108e-05, |
|
"loss": 0.0, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 1.5931142410015648, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.577533793921083e-05, |
|
"loss": 0.0, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 1.596244131455399, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5610083850056664e-05, |
|
"loss": 0.0, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.5993740219092332, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5444878072968963e-05, |
|
"loss": 0.0, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 1.6025039123630673, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.527972242607642e-05, |
|
"loss": 0.0, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 1.6056338028169015, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.511461872695602e-05, |
|
"loss": 0.0, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.6087636932707357, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.4949568792613066e-05, |
|
"loss": 0.0, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 1.6118935837245696, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.478457443946117e-05, |
|
"loss": 0.0, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.6150234741784038, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.461963748330221e-05, |
|
"loss": 0.0, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.618153364632238, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.445475973930647e-05, |
|
"loss": 0.0, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 1.6212832550860719, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.428994302199254e-05, |
|
"loss": 0.0, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 1.624413145539906, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.412518914520741e-05, |
|
"loss": 0.0, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.6275430359937402, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.396049992210652e-05, |
|
"loss": 0.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.6306729264475743, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.379587716513372e-05, |
|
"loss": 0.0, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 1.6338028169014085, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.363132268600145e-05, |
|
"loss": 0.0, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.6369327073552427, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.34668382956707e-05, |
|
"loss": 0.0, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 1.6400625978090768, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.330242580433111e-05, |
|
"loss": 0.0, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 1.6431924882629108, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.313808702138109e-05, |
|
"loss": 0.0, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.646322378716745, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.297382375540784e-05, |
|
"loss": 0.0, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 1.649452269170579, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.2809637814167505e-05, |
|
"loss": 0.0, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.652582159624413, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.264553100456523e-05, |
|
"loss": 0.0, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.6557120500782472, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.248150513263528e-05, |
|
"loss": 0.0, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.6588419405320813, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.231756200352123e-05, |
|
"loss": 0.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.6619718309859155, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.215370342145601e-05, |
|
"loss": 0.0, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.6651017214397497, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.198993118974212e-05, |
|
"loss": 0.0, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 1.6682316118935838, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.1826247110731755e-05, |
|
"loss": 0.0, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 1.671361502347418, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.166265298580694e-05, |
|
"loss": 0.0, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.674491392801252, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.1499150615359726e-05, |
|
"loss": 0.0, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.677621283255086, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.133574179877243e-05, |
|
"loss": 0.0, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 1.6807511737089202, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.1172428334397754e-05, |
|
"loss": 0.0, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.6838810641627542, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.1009212019539044e-05, |
|
"loss": 0.0, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.6870109546165883, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.084609465043047e-05, |
|
"loss": 0.0, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 1.6901408450704225, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.068307802221734e-05, |
|
"loss": 0.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.6932707355242567, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.052016392893616e-05, |
|
"loss": 0.0, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 1.6964006259780908, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.035735416349515e-05, |
|
"loss": 0.0, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 1.699530516431925, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.0194650517654306e-05, |
|
"loss": 0.0, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.7026604068857591, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.003205478200576e-05, |
|
"loss": 0.0, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 1.705790297339593, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.986956874595409e-05, |
|
"loss": 0.0, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.7089201877934272, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9707194197696584e-05, |
|
"loss": 0.0, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.7120500782472612, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9544932924203594e-05, |
|
"loss": 0.0, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 1.7151799687010953, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9382786711198814e-05, |
|
"loss": 0.0, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 1.7183098591549295, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.922075734313974e-05, |
|
"loss": 0.0, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.7214397496087637, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.905884660319792e-05, |
|
"loss": 0.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.7245696400625978, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.8897056273239354e-05, |
|
"loss": 0.0, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 1.727699530516432, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.8735388133804936e-05, |
|
"loss": 0.0, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.7308294209702662, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.857384396409083e-05, |
|
"loss": 0.0, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.7339593114241003, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.841242554192884e-05, |
|
"loss": 0.0, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 1.7370892018779343, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.825113464376693e-05, |
|
"loss": 0.0, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.7402190923317684, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.808997304464961e-05, |
|
"loss": 0.0, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 1.7433489827856024, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7928942518198444e-05, |
|
"loss": 0.0, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 1.7464788732394365, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7768044836592475e-05, |
|
"loss": 0.0, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.7496087636932707, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7607281770548816e-05, |
|
"loss": 0.0, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 1.7527386541471048, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.744665508930303e-05, |
|
"loss": 0.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.755868544600939, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.728616656058977e-05, |
|
"loss": 0.0, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.7589984350547732, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.712581795062331e-05, |
|
"loss": 0.0, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.7621283255086073, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.696561102407805e-05, |
|
"loss": 0.0, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.7652582159624415, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6805547544069144e-05, |
|
"loss": 0.0, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.7683881064162754, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.664562927213308e-05, |
|
"loss": 0.0, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.7715179968701096, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.648585796820833e-05, |
|
"loss": 0.0, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 1.7746478873239435, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.632623539061585e-05, |
|
"loss": 0.0, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.616676329603995e-05, |
|
"loss": 0.0, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 1.7809076682316118, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.600744343950876e-05, |
|
"loss": 0.0, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 1.784037558685446, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5848277574375024e-05, |
|
"loss": 0.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.7871674491392802, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.568926745229677e-05, |
|
"loss": 0.0, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 1.7902973395931143, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.553041482321805e-05, |
|
"loss": 0.0, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 1.7934272300469485, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5371721435349615e-05, |
|
"loss": 0.0, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.7965571205007824, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.521318903514979e-05, |
|
"loss": 0.0, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 1.7996870109546166, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.505481936730516e-05, |
|
"loss": 0.0, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.8028169014084507, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.489661417471142e-05, |
|
"loss": 0.0, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.8059467918622847, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.473857519845415e-05, |
|
"loss": 0.0, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 1.8090766823161188, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.458070417778974e-05, |
|
"loss": 0.0, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 1.812206572769953, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.442300285012609e-05, |
|
"loss": 0.0, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.8153364632237872, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4265472951003676e-05, |
|
"loss": 0.0, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.8184663536776213, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.410811621407633e-05, |
|
"loss": 0.0, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 1.8215962441314555, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.395093437109219e-05, |
|
"loss": 0.0, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.8247261345852896, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.3793929151874635e-05, |
|
"loss": 0.0, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 1.8278560250391236, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.363710228430329e-05, |
|
"loss": 0.0, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 1.8309859154929577, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.348045549429495e-05, |
|
"loss": 0.0, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.834115805946792, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.332399050578462e-05, |
|
"loss": 0.0, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 1.8372456964006258, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.3167709040706535e-05, |
|
"loss": 0.0, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 1.84037558685446, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.301161281897523e-05, |
|
"loss": 0.0, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.8435054773082942, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.285570355846657e-05, |
|
"loss": 0.0, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 1.8466353677621283, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.269998297499887e-05, |
|
"loss": 0.0, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.8497652582159625, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.254445278231405e-05, |
|
"loss": 0.0, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.8528951486697967, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.238911469205865e-05, |
|
"loss": 0.0, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.8560250391236308, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.223397041376515e-05, |
|
"loss": 0.0, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.8591549295774648, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.207902165483305e-05, |
|
"loss": 0.0, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.862284820031299, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.1924270120510135e-05, |
|
"loss": 0.0, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.8654147104851329, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.176971751387368e-05, |
|
"loss": 0.0, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 1.868544600938967, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.161536553581172e-05, |
|
"loss": 0.0, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.8716744913928012, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.1461215885004266e-05, |
|
"loss": 0.0, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 1.8748043818466353, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.1307270257904763e-05, |
|
"loss": 0.0, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 1.8779342723004695, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.1153530348721257e-05, |
|
"loss": 0.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8810641627543037, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.099999784939784e-05, |
|
"loss": 0.0, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 1.8841940532081378, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.084667444959605e-05, |
|
"loss": 0.0, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 1.887323943661972, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0693561836676174e-05, |
|
"loss": 0.0, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.890453834115806, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0540661695678755e-05, |
|
"loss": 0.0, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 1.89358372456964, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0387975709306043e-05, |
|
"loss": 0.0, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.896713615023474, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0235505557903478e-05, |
|
"loss": 0.0, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.8998435054773082, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0083252919441158e-05, |
|
"loss": 0.0, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 1.9029733959311423, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.993121946949542e-05, |
|
"loss": 0.0, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 1.9061032863849765, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9779406881230383e-05, |
|
"loss": 0.0, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.9092331768388107, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9627816825379497e-05, |
|
"loss": 0.0, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.9123630672926448, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9476450970227233e-05, |
|
"loss": 0.0, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 1.915492957746479, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9325310981590642e-05, |
|
"loss": 0.0, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.9186228482003131, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.917439852280108e-05, |
|
"loss": 0.0, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 1.921752738654147, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9023715254685903e-05, |
|
"loss": 0.0, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 1.9248826291079812, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8873262835550118e-05, |
|
"loss": 0.0, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.9280125195618152, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.872304292115828e-05, |
|
"loss": 0.0, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 1.9311424100156493, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.857305716471601e-05, |
|
"loss": 0.0, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 1.9342723004694835, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8423307216852142e-05, |
|
"loss": 0.0, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.9374021909233177, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8273794725600255e-05, |
|
"loss": 0.0, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 1.9405320813771518, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.812452133638075e-05, |
|
"loss": 0.0, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.943661971830986, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7975488691982578e-05, |
|
"loss": 0.0, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 1.9467918622848202, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7826698432545317e-05, |
|
"loss": 0.0, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 1.9499217527386543, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.767815219554094e-05, |
|
"loss": 0.0, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 1.9530516431924883, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7529851615755993e-05, |
|
"loss": 0.0, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 1.9561815336463224, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.738179832527343e-05, |
|
"loss": 0.0, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.9593114241001564, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7233993953454795e-05, |
|
"loss": 0.0, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.9624413145539905, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7086440126922163e-05, |
|
"loss": 0.0, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.9655712050078247, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.693913846954036e-05, |
|
"loss": 0.0, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 1.9687010954616588, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6792090602398966e-05, |
|
"loss": 0.0, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 1.971830985915493, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.664529814379457e-05, |
|
"loss": 0.0, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.9749608763693272, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6498762709212956e-05, |
|
"loss": 0.0, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 1.9780907668231613, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6352485911311253e-05, |
|
"loss": 0.0, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 1.9812206572769953, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6206469359900236e-05, |
|
"loss": 0.0, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 1.9843505477308294, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6060714661926654e-05, |
|
"loss": 0.0, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 1.9874804381846636, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5915223421455448e-05, |
|
"loss": 0.0, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.9906103286384975, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5769997239652137e-05, |
|
"loss": 0.0, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 1.9937402190923317, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5625037714765277e-05, |
|
"loss": 0.0, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 1.9968701095461658, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5480346442108714e-05, |
|
"loss": 0.0, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5335925014044193e-05, |
|
"loss": 0.0, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": NaN, |
|
"eval_runtime": 15.1515, |
|
"eval_samples_per_second": 35.574, |
|
"eval_steps_per_second": 8.91, |
|
"step": 639 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 957, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6924437727779226e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|