rawr / trainer_state.json
rosethelocalfem's picture
Upload folder using huggingface_hub
d1f00d3 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.055645722235103175,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009274287039183863,
"grad_norm": 1.411400556564331,
"learning_rate": 0.0,
"loss": 4.3229,
"step": 1
},
{
"epoch": 0.0018548574078367725,
"grad_norm": 2.266972064971924,
"learning_rate": 5e-06,
"loss": 4.4344,
"step": 2
},
{
"epoch": 0.0027822861117551586,
"grad_norm": 0.6068131923675537,
"learning_rate": 1e-05,
"loss": 3.2018,
"step": 3
},
{
"epoch": 0.003709714815673545,
"grad_norm": 0.9275912642478943,
"learning_rate": 1.5e-05,
"loss": 4.0208,
"step": 4
},
{
"epoch": 0.004637143519591932,
"grad_norm": 0.9753095507621765,
"learning_rate": 2e-05,
"loss": 3.7113,
"step": 5
},
{
"epoch": 0.005564572223510317,
"grad_norm": 2.6469643115997314,
"learning_rate": 2.5e-05,
"loss": 5.2115,
"step": 6
},
{
"epoch": 0.006492000927428704,
"grad_norm": 1.4554330110549927,
"learning_rate": 3e-05,
"loss": 4.2436,
"step": 7
},
{
"epoch": 0.00741942963134709,
"grad_norm": 0.5066649913787842,
"learning_rate": 3.5e-05,
"loss": 3.5398,
"step": 8
},
{
"epoch": 0.008346858335265477,
"grad_norm": 2.082305908203125,
"learning_rate": 4e-05,
"loss": 4.8127,
"step": 9
},
{
"epoch": 0.009274287039183864,
"grad_norm": 0.4055868983268738,
"learning_rate": 4.5e-05,
"loss": 3.3663,
"step": 10
},
{
"epoch": 0.010201715743102248,
"grad_norm": 0.363479882478714,
"learning_rate": 5e-05,
"loss": 3.2632,
"step": 11
},
{
"epoch": 0.011129144447020635,
"grad_norm": 1.7619837522506714,
"learning_rate": 4.9e-05,
"loss": 4.7703,
"step": 12
},
{
"epoch": 0.012056573150939021,
"grad_norm": 0.48136666417121887,
"learning_rate": 4.8e-05,
"loss": 3.1614,
"step": 13
},
{
"epoch": 0.012984001854857407,
"grad_norm": 0.48472875356674194,
"learning_rate": 4.7e-05,
"loss": 3.3447,
"step": 14
},
{
"epoch": 0.013911430558775794,
"grad_norm": 0.5840467214584351,
"learning_rate": 4.600000000000001e-05,
"loss": 3.3037,
"step": 15
},
{
"epoch": 0.01483885926269418,
"grad_norm": 1.9077085256576538,
"learning_rate": 4.5e-05,
"loss": 4.614,
"step": 16
},
{
"epoch": 0.015766287966612568,
"grad_norm": 0.5734739899635315,
"learning_rate": 4.4000000000000006e-05,
"loss": 3.1754,
"step": 17
},
{
"epoch": 0.016693716670530954,
"grad_norm": 1.8598120212554932,
"learning_rate": 4.3e-05,
"loss": 4.3064,
"step": 18
},
{
"epoch": 0.01762114537444934,
"grad_norm": 0.5264394283294678,
"learning_rate": 4.2e-05,
"loss": 3.2004,
"step": 19
},
{
"epoch": 0.018548574078367727,
"grad_norm": 0.6078647375106812,
"learning_rate": 4.1e-05,
"loss": 3.3305,
"step": 20
},
{
"epoch": 0.01947600278228611,
"grad_norm": 0.49842751026153564,
"learning_rate": 4e-05,
"loss": 3.4103,
"step": 21
},
{
"epoch": 0.020403431486204496,
"grad_norm": 0.6825811862945557,
"learning_rate": 3.9000000000000006e-05,
"loss": 3.4553,
"step": 22
},
{
"epoch": 0.021330860190122883,
"grad_norm": 1.0185341835021973,
"learning_rate": 3.8e-05,
"loss": 3.6376,
"step": 23
},
{
"epoch": 0.02225828889404127,
"grad_norm": 0.3887212574481964,
"learning_rate": 3.7e-05,
"loss": 2.9089,
"step": 24
},
{
"epoch": 0.023185717597959656,
"grad_norm": 1.5926904678344727,
"learning_rate": 3.6e-05,
"loss": 3.6112,
"step": 25
},
{
"epoch": 0.024113146301878042,
"grad_norm": 1.4020466804504395,
"learning_rate": 3.5e-05,
"loss": 3.8137,
"step": 26
},
{
"epoch": 0.025040575005796428,
"grad_norm": 0.8697665929794312,
"learning_rate": 3.4000000000000007e-05,
"loss": 3.3651,
"step": 27
},
{
"epoch": 0.025968003709714815,
"grad_norm": 1.1937010288238525,
"learning_rate": 3.3e-05,
"loss": 3.7935,
"step": 28
},
{
"epoch": 0.0268954324136332,
"grad_norm": 0.583304762840271,
"learning_rate": 3.2000000000000005e-05,
"loss": 3.279,
"step": 29
},
{
"epoch": 0.027822861117551587,
"grad_norm": 2.1346917152404785,
"learning_rate": 3.1e-05,
"loss": 4.1021,
"step": 30
},
{
"epoch": 0.028750289821469974,
"grad_norm": 0.7281085848808289,
"learning_rate": 3e-05,
"loss": 3.3558,
"step": 31
},
{
"epoch": 0.02967771852538836,
"grad_norm": 1.7035914659500122,
"learning_rate": 2.9e-05,
"loss": 3.5765,
"step": 32
},
{
"epoch": 0.030605147229306746,
"grad_norm": 1.3239504098892212,
"learning_rate": 2.8000000000000003e-05,
"loss": 3.6181,
"step": 33
},
{
"epoch": 0.031532575933225136,
"grad_norm": 0.9565314650535583,
"learning_rate": 2.7000000000000002e-05,
"loss": 3.5135,
"step": 34
},
{
"epoch": 0.03246000463714352,
"grad_norm": 1.0347371101379395,
"learning_rate": 2.6000000000000002e-05,
"loss": 3.2434,
"step": 35
},
{
"epoch": 0.03338743334106191,
"grad_norm": 0.54816734790802,
"learning_rate": 2.5e-05,
"loss": 3.1409,
"step": 36
},
{
"epoch": 0.03431486204498029,
"grad_norm": 0.4945932626724243,
"learning_rate": 2.4e-05,
"loss": 2.9236,
"step": 37
},
{
"epoch": 0.03524229074889868,
"grad_norm": 0.48317691683769226,
"learning_rate": 2.3000000000000003e-05,
"loss": 3.3575,
"step": 38
},
{
"epoch": 0.036169719452817065,
"grad_norm": 1.2033950090408325,
"learning_rate": 2.2000000000000003e-05,
"loss": 3.6258,
"step": 39
},
{
"epoch": 0.037097148156735454,
"grad_norm": 1.137432336807251,
"learning_rate": 2.1e-05,
"loss": 3.5354,
"step": 40
},
{
"epoch": 0.03802457686065384,
"grad_norm": 1.494925856590271,
"learning_rate": 2e-05,
"loss": 3.6365,
"step": 41
},
{
"epoch": 0.03895200556457222,
"grad_norm": 1.1683684587478638,
"learning_rate": 1.9e-05,
"loss": 3.7584,
"step": 42
},
{
"epoch": 0.03987943426849061,
"grad_norm": 1.5439304113388062,
"learning_rate": 1.8e-05,
"loss": 3.7813,
"step": 43
},
{
"epoch": 0.04080686297240899,
"grad_norm": 1.4335922002792358,
"learning_rate": 1.7000000000000003e-05,
"loss": 3.5158,
"step": 44
},
{
"epoch": 0.04173429167632738,
"grad_norm": 1.0030946731567383,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.4701,
"step": 45
},
{
"epoch": 0.042661720380245766,
"grad_norm": 0.8070414662361145,
"learning_rate": 1.5e-05,
"loss": 3.259,
"step": 46
},
{
"epoch": 0.043589149084164155,
"grad_norm": 1.9914580583572388,
"learning_rate": 1.4000000000000001e-05,
"loss": 4.3585,
"step": 47
},
{
"epoch": 0.04451657778808254,
"grad_norm": 0.6409067511558533,
"learning_rate": 1.3000000000000001e-05,
"loss": 3.3393,
"step": 48
},
{
"epoch": 0.04544400649200093,
"grad_norm": 0.910348117351532,
"learning_rate": 1.2e-05,
"loss": 3.4981,
"step": 49
},
{
"epoch": 0.04637143519591931,
"grad_norm": 1.3223044872283936,
"learning_rate": 1.1000000000000001e-05,
"loss": 3.6996,
"step": 50
},
{
"epoch": 0.0472988638998377,
"grad_norm": 0.3819347620010376,
"learning_rate": 1e-05,
"loss": 2.9741,
"step": 51
},
{
"epoch": 0.048226292603756084,
"grad_norm": 1.3223705291748047,
"learning_rate": 9e-06,
"loss": 3.7789,
"step": 52
},
{
"epoch": 0.049153721307674474,
"grad_norm": 0.6920037865638733,
"learning_rate": 8.000000000000001e-06,
"loss": 3.1836,
"step": 53
},
{
"epoch": 0.050081150011592857,
"grad_norm": 0.6605297923088074,
"learning_rate": 7.000000000000001e-06,
"loss": 2.998,
"step": 54
},
{
"epoch": 0.051008578715511246,
"grad_norm": 0.8535223603248596,
"learning_rate": 6e-06,
"loss": 3.1944,
"step": 55
},
{
"epoch": 0.05193600741942963,
"grad_norm": 1.4277936220169067,
"learning_rate": 5e-06,
"loss": 3.6891,
"step": 56
},
{
"epoch": 0.05286343612334802,
"grad_norm": 3.182830572128296,
"learning_rate": 4.000000000000001e-06,
"loss": 5.1859,
"step": 57
},
{
"epoch": 0.0537908648272664,
"grad_norm": 0.6000503301620483,
"learning_rate": 3e-06,
"loss": 3.3717,
"step": 58
},
{
"epoch": 0.05471829353118479,
"grad_norm": 1.301562786102295,
"learning_rate": 2.0000000000000003e-06,
"loss": 3.4226,
"step": 59
},
{
"epoch": 0.055645722235103175,
"grad_norm": 0.7561865448951721,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.0751,
"step": 60
}
],
"logging_steps": 1,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 951810432098304.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}