Oblivion2.5-1.5B-v1 / trainer_state.json
r1char9's picture
Upload checkpoint-4870 for Oblivion 1 version
f38c6cf verified
{
"best_global_step": 4870,
"best_metric": 1.0858850479125977,
"best_model_checkpoint": "/mnt/tank/scratch/rgurtsiev/workflow/STAGE1-DEMO/checkpoint-4870",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 4870,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1026694045174538,
"grad_norm": 1.1697688102722168,
"learning_rate": 1.3415468856947298e-06,
"loss": 3.6536,
"step": 50
},
{
"epoch": 0.2053388090349076,
"grad_norm": 1.3012523651123047,
"learning_rate": 2.7104722792607808e-06,
"loss": 3.6247,
"step": 100
},
{
"epoch": 0.3080082135523614,
"grad_norm": 1.3252615928649902,
"learning_rate": 4.0793976728268315e-06,
"loss": 3.5833,
"step": 150
},
{
"epoch": 0.4106776180698152,
"grad_norm": 1.2821773290634155,
"learning_rate": 5.448323066392882e-06,
"loss": 3.471,
"step": 200
},
{
"epoch": 0.5133470225872689,
"grad_norm": 1.5595322847366333,
"learning_rate": 6.817248459958933e-06,
"loss": 3.2428,
"step": 250
},
{
"epoch": 0.6160164271047228,
"grad_norm": 1.5284234285354614,
"learning_rate": 8.186173853524983e-06,
"loss": 2.9622,
"step": 300
},
{
"epoch": 0.7186858316221766,
"grad_norm": 1.7251927852630615,
"learning_rate": 9.555099247091034e-06,
"loss": 2.536,
"step": 350
},
{
"epoch": 0.8213552361396304,
"grad_norm": 1.3935151100158691,
"learning_rate": 1.0924024640657084e-05,
"loss": 2.0349,
"step": 400
},
{
"epoch": 0.9240246406570842,
"grad_norm": 0.7713799476623535,
"learning_rate": 1.2292950034223137e-05,
"loss": 1.7696,
"step": 450
},
{
"epoch": 1.0,
"eval_loss": 1.657446265220642,
"eval_runtime": 396.4486,
"eval_samples_per_second": 4.911,
"eval_steps_per_second": 0.615,
"step": 487
},
{
"epoch": 1.0266940451745379,
"grad_norm": 0.8125890493392944,
"learning_rate": 1.3661875427789186e-05,
"loss": 1.6644,
"step": 500
},
{
"epoch": 1.1293634496919918,
"grad_norm": 0.7177293300628662,
"learning_rate": 1.5030800821355238e-05,
"loss": 1.5654,
"step": 550
},
{
"epoch": 1.2320328542094456,
"grad_norm": 0.9869002103805542,
"learning_rate": 1.6399726214921288e-05,
"loss": 1.543,
"step": 600
},
{
"epoch": 1.3347022587268995,
"grad_norm": 1.156468152999878,
"learning_rate": 1.776865160848734e-05,
"loss": 1.5008,
"step": 650
},
{
"epoch": 1.4373716632443532,
"grad_norm": 0.9758607149124146,
"learning_rate": 1.913757700205339e-05,
"loss": 1.4734,
"step": 700
},
{
"epoch": 1.5400410677618068,
"grad_norm": 1.0511637926101685,
"learning_rate": 2.050650239561944e-05,
"loss": 1.4343,
"step": 750
},
{
"epoch": 1.6427104722792607,
"grad_norm": 1.1518610715866089,
"learning_rate": 2.187542778918549e-05,
"loss": 1.4289,
"step": 800
},
{
"epoch": 1.7453798767967146,
"grad_norm": 1.1441558599472046,
"learning_rate": 2.324435318275154e-05,
"loss": 1.3902,
"step": 850
},
{
"epoch": 1.8480492813141685,
"grad_norm": 1.3006062507629395,
"learning_rate": 2.4613278576317596e-05,
"loss": 1.3834,
"step": 900
},
{
"epoch": 1.9507186858316223,
"grad_norm": 1.1244959831237793,
"learning_rate": 2.5982203969883643e-05,
"loss": 1.3556,
"step": 950
},
{
"epoch": 2.0,
"eval_loss": 1.3616195917129517,
"eval_runtime": 396.64,
"eval_samples_per_second": 4.909,
"eval_steps_per_second": 0.615,
"step": 974
},
{
"epoch": 2.0533880903490758,
"grad_norm": 1.4575847387313843,
"learning_rate": 2.7351129363449694e-05,
"loss": 1.3388,
"step": 1000
},
{
"epoch": 2.1560574948665296,
"grad_norm": 1.3578325510025024,
"learning_rate": 2.8720054757015744e-05,
"loss": 1.3067,
"step": 1050
},
{
"epoch": 2.2587268993839835,
"grad_norm": 1.9210494756698608,
"learning_rate": 3.0088980150581795e-05,
"loss": 1.3183,
"step": 1100
},
{
"epoch": 2.3613963039014374,
"grad_norm": 2.051852226257324,
"learning_rate": 3.1457905544147846e-05,
"loss": 1.274,
"step": 1150
},
{
"epoch": 2.4640657084188913,
"grad_norm": 1.660125970840454,
"learning_rate": 3.282683093771389e-05,
"loss": 1.2661,
"step": 1200
},
{
"epoch": 2.566735112936345,
"grad_norm": 4.411022663116455,
"learning_rate": 3.419575633127995e-05,
"loss": 1.2339,
"step": 1250
},
{
"epoch": 2.669404517453799,
"grad_norm": 1.519783854484558,
"learning_rate": 3.5564681724846e-05,
"loss": 1.2286,
"step": 1300
},
{
"epoch": 2.7720739219712525,
"grad_norm": 1.7709407806396484,
"learning_rate": 3.693360711841205e-05,
"loss": 1.219,
"step": 1350
},
{
"epoch": 2.8747433264887063,
"grad_norm": 1.8909534215927124,
"learning_rate": 3.8302532511978096e-05,
"loss": 1.2072,
"step": 1400
},
{
"epoch": 2.97741273100616,
"grad_norm": 1.4890103340148926,
"learning_rate": 3.967145790554415e-05,
"loss": 1.1951,
"step": 1450
},
{
"epoch": 3.0,
"eval_loss": 1.2139294147491455,
"eval_runtime": 396.2906,
"eval_samples_per_second": 4.913,
"eval_steps_per_second": 0.616,
"step": 1461
},
{
"epoch": 3.080082135523614,
"grad_norm": 2.104808807373047,
"learning_rate": 3.998773779555414e-05,
"loss": 1.2078,
"step": 1500
},
{
"epoch": 3.1827515400410675,
"grad_norm": 1.3491119146347046,
"learning_rate": 3.993426857404775e-05,
"loss": 1.1717,
"step": 1550
},
{
"epoch": 3.2854209445585214,
"grad_norm": 1.7482995986938477,
"learning_rate": 3.983848290367661e-05,
"loss": 1.1757,
"step": 1600
},
{
"epoch": 3.3880903490759753,
"grad_norm": 1.8251243829727173,
"learning_rate": 3.9700584118182686e-05,
"loss": 1.1688,
"step": 1650
},
{
"epoch": 3.490759753593429,
"grad_norm": 1.9155806303024292,
"learning_rate": 3.9520864948993926e-05,
"loss": 1.1742,
"step": 1700
},
{
"epoch": 3.593429158110883,
"grad_norm": 1.8360294103622437,
"learning_rate": 3.929970690381425e-05,
"loss": 1.1474,
"step": 1750
},
{
"epoch": 3.696098562628337,
"grad_norm": 1.893100380897522,
"learning_rate": 3.903757945675931e-05,
"loss": 1.1596,
"step": 1800
},
{
"epoch": 3.798767967145791,
"grad_norm": 1.7409580945968628,
"learning_rate": 3.8735039051757204e-05,
"loss": 1.1496,
"step": 1850
},
{
"epoch": 3.9014373716632442,
"grad_norm": 1.4829061031341553,
"learning_rate": 3.83927279213298e-05,
"loss": 1.1262,
"step": 1900
},
{
"epoch": 4.0,
"eval_loss": 1.1556336879730225,
"eval_runtime": 396.6647,
"eval_samples_per_second": 4.908,
"eval_steps_per_second": 0.615,
"step": 1948
},
{
"epoch": 4.0041067761806985,
"grad_norm": 1.6514705419540405,
"learning_rate": 3.801137272326205e-05,
"loss": 1.1419,
"step": 1950
},
{
"epoch": 4.1067761806981515,
"grad_norm": 1.7151414155960083,
"learning_rate": 3.759178299805344e-05,
"loss": 1.1441,
"step": 2000
},
{
"epoch": 4.209445585215605,
"grad_norm": 1.5632063150405884,
"learning_rate": 3.713484945042608e-05,
"loss": 1.1271,
"step": 2050
},
{
"epoch": 4.312114989733059,
"grad_norm": 1.619882583618164,
"learning_rate": 3.664154205853749e-05,
"loss": 1.1016,
"step": 2100
},
{
"epoch": 4.414784394250513,
"grad_norm": 1.6811403036117554,
"learning_rate": 3.6112908014911744e-05,
"loss": 1.1203,
"step": 2150
},
{
"epoch": 4.517453798767967,
"grad_norm": 1.52841055393219,
"learning_rate": 3.555006950346002e-05,
"loss": 1.1152,
"step": 2200
},
{
"epoch": 4.620123203285421,
"grad_norm": 1.482682704925537,
"learning_rate": 3.4954221317309576e-05,
"loss": 1.1063,
"step": 2250
},
{
"epoch": 4.722792607802875,
"grad_norm": 1.508597731590271,
"learning_rate": 3.4326628322497834e-05,
"loss": 1.1095,
"step": 2300
},
{
"epoch": 4.825462012320329,
"grad_norm": 1.5862213373184204,
"learning_rate": 3.366862277291585e-05,
"loss": 1.1062,
"step": 2350
},
{
"epoch": 4.9281314168377826,
"grad_norm": 1.6203546524047852,
"learning_rate": 3.298160148220079e-05,
"loss": 1.1189,
"step": 2400
},
{
"epoch": 5.0,
"eval_loss": 1.1254490613937378,
"eval_runtime": 395.8276,
"eval_samples_per_second": 4.919,
"eval_steps_per_second": 0.616,
"step": 2435
},
{
"epoch": 5.030800821355236,
"grad_norm": 1.5772205591201782,
"learning_rate": 3.2267022858581186e-05,
"loss": 1.114,
"step": 2450
},
{
"epoch": 5.13347022587269,
"grad_norm": 1.4345210790634155,
"learning_rate": 3.152640380896904e-05,
"loss": 1.0856,
"step": 2500
},
{
"epoch": 5.236139630390143,
"grad_norm": 1.4504789113998413,
"learning_rate": 3.0761316518871184e-05,
"loss": 1.0866,
"step": 2550
},
{
"epoch": 5.338809034907597,
"grad_norm": 1.841271996498108,
"learning_rate": 2.997338511495513e-05,
"loss": 1.0819,
"step": 2600
},
{
"epoch": 5.441478439425051,
"grad_norm": 1.3801902532577515,
"learning_rate": 2.916428221735433e-05,
"loss": 1.1059,
"step": 2650
},
{
"epoch": 5.544147843942505,
"grad_norm": 1.5078575611114502,
"learning_rate": 2.8335725389031572e-05,
"loss": 1.0974,
"step": 2700
},
{
"epoch": 5.646817248459959,
"grad_norm": 1.6412862539291382,
"learning_rate": 2.7489473489737663e-05,
"loss": 1.1025,
"step": 2750
},
{
"epoch": 5.749486652977413,
"grad_norm": 1.6640971899032593,
"learning_rate": 2.6627322942305504e-05,
"loss": 1.0769,
"step": 2800
},
{
"epoch": 5.852156057494867,
"grad_norm": 1.8713879585266113,
"learning_rate": 2.57511039192051e-05,
"loss": 1.0739,
"step": 2850
},
{
"epoch": 5.95482546201232,
"grad_norm": 1.477726936340332,
"learning_rate": 2.4862676457455003e-05,
"loss": 1.0914,
"step": 2900
},
{
"epoch": 6.0,
"eval_loss": 1.1069694757461548,
"eval_runtime": 396.2551,
"eval_samples_per_second": 4.914,
"eval_steps_per_second": 0.616,
"step": 2922
},
{
"epoch": 6.057494866529774,
"grad_norm": 1.5570802688598633,
"learning_rate": 2.396392651013723e-05,
"loss": 1.0646,
"step": 2950
},
{
"epoch": 6.160164271047228,
"grad_norm": 1.5152437686920166,
"learning_rate": 2.3056761942897655e-05,
"loss": 1.0817,
"step": 3000
},
{
"epoch": 6.262833675564682,
"grad_norm": 1.4843331575393677,
"learning_rate": 2.2143108483930357e-05,
"loss": 1.0762,
"step": 3050
},
{
"epoch": 6.365503080082135,
"grad_norm": 1.504683256149292,
"learning_rate": 2.122490563604342e-05,
"loss": 1.0669,
"step": 3100
},
{
"epoch": 6.468172484599589,
"grad_norm": 1.5838227272033691,
"learning_rate": 2.0304102559483984e-05,
"loss": 1.0597,
"step": 3150
},
{
"epoch": 6.570841889117043,
"grad_norm": 1.61528480052948,
"learning_rate": 1.9382653934262424e-05,
"loss": 1.0839,
"step": 3200
},
{
"epoch": 6.673511293634497,
"grad_norm": 1.4133933782577515,
"learning_rate": 1.8462515810759347e-05,
"loss": 1.0798,
"step": 3250
},
{
"epoch": 6.776180698151951,
"grad_norm": 1.4809049367904663,
"learning_rate": 1.7545641457423334e-05,
"loss": 1.0666,
"step": 3300
},
{
"epoch": 6.878850102669404,
"grad_norm": 1.6875197887420654,
"learning_rate": 1.6633977214374384e-05,
"loss": 1.0671,
"step": 3350
},
{
"epoch": 6.981519507186858,
"grad_norm": 1.5088533163070679,
"learning_rate": 1.5729458361714657e-05,
"loss": 1.0756,
"step": 3400
},
{
"epoch": 7.0,
"eval_loss": 1.0947245359420776,
"eval_runtime": 396.3527,
"eval_samples_per_second": 4.912,
"eval_steps_per_second": 0.616,
"step": 3409
},
{
"epoch": 7.084188911704312,
"grad_norm": 1.9185519218444824,
"learning_rate": 1.4834005011317545e-05,
"loss": 1.055,
"step": 3450
},
{
"epoch": 7.186858316221766,
"grad_norm": 1.6549229621887207,
"learning_rate": 1.3949518030815667e-05,
"loss": 1.0565,
"step": 3500
},
{
"epoch": 7.28952772073922,
"grad_norm": 1.5964972972869873,
"learning_rate": 1.3077875008440714e-05,
"loss": 1.0696,
"step": 3550
},
{
"epoch": 7.392197125256674,
"grad_norm": 1.5329166650772095,
"learning_rate": 1.2220926267280677e-05,
"loss": 1.0657,
"step": 3600
},
{
"epoch": 7.494866529774128,
"grad_norm": 1.6698694229125977,
"learning_rate": 1.1380490937415516e-05,
"loss": 1.0596,
"step": 3650
},
{
"epoch": 7.597535934291582,
"grad_norm": 1.5411231517791748,
"learning_rate": 1.0558353094269314e-05,
"loss": 1.059,
"step": 3700
},
{
"epoch": 7.700205338809035,
"grad_norm": 1.7882490158081055,
"learning_rate": 9.756257971376489e-06,
"loss": 1.0731,
"step": 3750
},
{
"epoch": 7.8028747433264884,
"grad_norm": 1.5504204034805298,
"learning_rate": 8.975908255601452e-06,
"loss": 1.0565,
"step": 3800
},
{
"epoch": 7.905544147843942,
"grad_norm": 1.4786473512649536,
"learning_rate": 8.21896047267632e-06,
"loss": 1.0502,
"step": 3850
},
{
"epoch": 8.0,
"eval_loss": 1.0888762474060059,
"eval_runtime": 396.8423,
"eval_samples_per_second": 4.906,
"eval_steps_per_second": 0.615,
"step": 3896
},
{
"epoch": 8.008213552361397,
"grad_norm": 1.6214848756790161,
"learning_rate": 7.487021470729523e-06,
"loss": 1.0634,
"step": 3900
},
{
"epoch": 8.11088295687885,
"grad_norm": 1.712120771408081,
"learning_rate": 6.781645009269846e-06,
"loss": 1.062,
"step": 3950
},
{
"epoch": 8.213552361396303,
"grad_norm": 1.5750735998153687,
"learning_rate": 6.1043284608671e-06,
"loss": 1.0387,
"step": 4000
},
{
"epoch": 8.316221765913758,
"grad_norm": 1.5793296098709106,
"learning_rate": 5.45650963253082e-06,
"loss": 1.0594,
"step": 4050
},
{
"epoch": 8.41889117043121,
"grad_norm": 1.445952296257019,
"learning_rate": 4.839563713534743e-06,
"loss": 1.0614,
"step": 4100
},
{
"epoch": 8.521560574948666,
"grad_norm": 1.731099009513855,
"learning_rate": 4.254800356166155e-06,
"loss": 1.0599,
"step": 4150
},
{
"epoch": 8.624229979466119,
"grad_norm": 1.7094162702560425,
"learning_rate": 3.7034608955971175e-06,
"loss": 1.0437,
"step": 4200
},
{
"epoch": 8.726899383983573,
"grad_norm": 1.6651116609573364,
"learning_rate": 3.1867157147791585e-06,
"loss": 1.0667,
"step": 4250
},
{
"epoch": 8.829568788501026,
"grad_norm": 1.5388877391815186,
"learning_rate": 2.705661759955318e-06,
"loss": 1.0529,
"step": 4300
},
{
"epoch": 8.932238193018481,
"grad_norm": 1.7173577547073364,
"learning_rate": 2.261320212063589e-06,
"loss": 1.0619,
"step": 4350
},
{
"epoch": 9.0,
"eval_loss": 1.0863490104675293,
"eval_runtime": 395.7285,
"eval_samples_per_second": 4.92,
"eval_steps_per_second": 0.617,
"step": 4383
},
{
"epoch": 9.034907597535934,
"grad_norm": 1.79407799243927,
"learning_rate": 1.854634318974835e-06,
"loss": 1.0494,
"step": 4400
},
{
"epoch": 9.137577002053389,
"grad_norm": 1.6543513536453247,
"learning_rate": 1.486467393167017e-06,
"loss": 1.0497,
"step": 4450
},
{
"epoch": 9.240246406570842,
"grad_norm": 1.5759410858154297,
"learning_rate": 1.1576009790861598e-06,
"loss": 1.0503,
"step": 4500
},
{
"epoch": 9.342915811088295,
"grad_norm": 1.4627271890640259,
"learning_rate": 8.687331940844545e-07,
"loss": 1.0585,
"step": 4550
},
{
"epoch": 9.44558521560575,
"grad_norm": 1.7757667303085327,
"learning_rate": 6.204772464573161e-07,
"loss": 1.0468,
"step": 4600
},
{
"epoch": 9.548254620123203,
"grad_norm": 1.7230888605117798,
"learning_rate": 4.1336013372533125e-07,
"loss": 1.0485,
"step": 4650
},
{
"epoch": 9.650924024640657,
"grad_norm": 1.7910829782485962,
"learning_rate": 2.478215239243409e-07,
"loss": 1.0498,
"step": 4700
},
{
"epoch": 9.75359342915811,
"grad_norm": 1.7358592748641968,
"learning_rate": 1.2421282227850662e-07,
"loss": 1.0548,
"step": 4750
},
{
"epoch": 9.856262833675565,
"grad_norm": 1.603044867515564,
"learning_rate": 4.2796425237592306e-08,
"loss": 1.0589,
"step": 4800
},
{
"epoch": 9.958932238193018,
"grad_norm": 1.4900743961334229,
"learning_rate": 3.745163461994494e-09,
"loss": 1.056,
"step": 4850
},
{
"epoch": 10.0,
"eval_loss": 1.0858850479125977,
"eval_runtime": 396.092,
"eval_samples_per_second": 4.916,
"eval_steps_per_second": 0.616,
"step": 4870
}
],
"logging_steps": 50,
"max_steps": 4870,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.27791813148672e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}