Satori-SFT-7B / trainer_state.json
chaoscodes's picture
Upload folder using huggingface_hub
3bdeb58 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9957356076759063,
"eval_steps": 1000,
"global_step": 468,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.042643923240938165,
"grad_norm": 3.179123776279851,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.432,
"step": 10
},
{
"epoch": 0.08528784648187633,
"grad_norm": 2.0834838340230113,
"learning_rate": 1.999398868012663e-05,
"loss": 0.266,
"step": 20
},
{
"epoch": 0.1279317697228145,
"grad_norm": 1.839885995199569,
"learning_rate": 1.9945941475610623e-05,
"loss": 0.189,
"step": 30
},
{
"epoch": 0.17057569296375266,
"grad_norm": 0.3374452319668954,
"learning_rate": 1.9850078058821615e-05,
"loss": 0.1405,
"step": 40
},
{
"epoch": 0.21321961620469082,
"grad_norm": 0.35572662705742497,
"learning_rate": 1.970685930372489e-05,
"loss": 0.1352,
"step": 50
},
{
"epoch": 0.255863539445629,
"grad_norm": 0.34732501425850515,
"learning_rate": 1.951697375030553e-05,
"loss": 0.1311,
"step": 60
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.29120729702844944,
"learning_rate": 1.9281334294336364e-05,
"loss": 0.1211,
"step": 70
},
{
"epoch": 0.3411513859275053,
"grad_norm": 0.2548677782780123,
"learning_rate": 1.90010737985307e-05,
"loss": 0.1204,
"step": 80
},
{
"epoch": 0.3837953091684435,
"grad_norm": 0.23124279128247788,
"learning_rate": 1.8677539646179706e-05,
"loss": 0.1216,
"step": 90
},
{
"epoch": 0.42643923240938164,
"grad_norm": 0.2233543638243816,
"learning_rate": 1.831228726345841e-05,
"loss": 0.1187,
"step": 100
},
{
"epoch": 0.4690831556503198,
"grad_norm": 0.21228773844702403,
"learning_rate": 1.7907072641542527e-05,
"loss": 0.1147,
"step": 110
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.21240166483768347,
"learning_rate": 1.746384389448694e-05,
"loss": 0.1123,
"step": 120
},
{
"epoch": 0.5543710021321961,
"grad_norm": 0.22092994006720384,
"learning_rate": 1.6984731893452174e-05,
"loss": 0.1122,
"step": 130
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.2146266283518355,
"learning_rate": 1.647204002230594e-05,
"loss": 0.1104,
"step": 140
},
{
"epoch": 0.6396588486140725,
"grad_norm": 0.20918171426009047,
"learning_rate": 1.592823310385073e-05,
"loss": 0.1112,
"step": 150
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.1967909221104072,
"learning_rate": 1.5355925549915943e-05,
"loss": 0.1124,
"step": 160
},
{
"epoch": 0.7249466950959488,
"grad_norm": 0.2240605099251795,
"learning_rate": 1.4757868792284231e-05,
"loss": 0.1137,
"step": 170
},
{
"epoch": 0.767590618336887,
"grad_norm": 0.20377236444556038,
"learning_rate": 1.4136938054879284e-05,
"loss": 0.1097,
"step": 180
},
{
"epoch": 0.8102345415778252,
"grad_norm": 0.2106109081511342,
"learning_rate": 1.3496118530809195e-05,
"loss": 0.112,
"step": 190
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.18132699922626352,
"learning_rate": 1.2838491030720882e-05,
"loss": 0.1079,
"step": 200
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.1926837140760371,
"learning_rate": 1.2167217171462566e-05,
"loss": 0.1087,
"step": 210
},
{
"epoch": 0.9381663113006397,
"grad_norm": 0.19627892114205173,
"learning_rate": 1.148552417626157e-05,
"loss": 0.1115,
"step": 220
},
{
"epoch": 0.9808102345415778,
"grad_norm": 0.19029596670392074,
"learning_rate": 1.0796689359492154e-05,
"loss": 0.108,
"step": 230
},
{
"epoch": 1.023454157782516,
"grad_norm": 0.18044861833183792,
"learning_rate": 1.0104024370624644e-05,
"loss": 0.0849,
"step": 240
},
{
"epoch": 1.0660980810234542,
"grad_norm": 0.19142738031135273,
"learning_rate": 9.410859273104823e-06,
"loss": 0.0661,
"step": 250
},
{
"epoch": 1.1087420042643923,
"grad_norm": 0.17150102289558444,
"learning_rate": 8.720526534706052e-06,
"loss": 0.0642,
"step": 260
},
{
"epoch": 1.1513859275053304,
"grad_norm": 0.2442559352462789,
"learning_rate": 8.036345006322358e-06,
"loss": 0.0645,
"step": 270
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.17495312344390224,
"learning_rate": 7.361603966226165e-06,
"loss": 0.0621,
"step": 280
},
{
"epoch": 1.236673773987207,
"grad_norm": 0.16258359858243326,
"learning_rate": 6.6995473064996455e-06,
"loss": 0.0595,
"step": 290
},
{
"epoch": 1.279317697228145,
"grad_norm": 0.17596476037955336,
"learning_rate": 6.053357937665237e-06,
"loss": 0.0622,
"step": 300
},
{
"epoch": 1.3219616204690832,
"grad_norm": 0.158017144205196,
"learning_rate": 5.4261424864917075e-06,
"loss": 0.0608,
"step": 310
},
{
"epoch": 1.3646055437100213,
"grad_norm": 0.17821374662840944,
"learning_rate": 4.82091636054281e-06,
"loss": 0.061,
"step": 320
},
{
"epoch": 1.4072494669509594,
"grad_norm": 0.1909778433139327,
"learning_rate": 4.240589251272342e-06,
"loss": 0.0605,
"step": 330
},
{
"epoch": 1.4498933901918978,
"grad_norm": 0.18171746562096064,
"learning_rate": 3.687951145361073e-06,
"loss": 0.0631,
"step": 340
},
{
"epoch": 1.4925373134328357,
"grad_norm": 0.17460796522473127,
"learning_rate": 3.165658911547592e-06,
"loss": 0.06,
"step": 350
},
{
"epoch": 1.535181236673774,
"grad_norm": 0.1810617961103566,
"learning_rate": 2.6762235274383775e-06,
"loss": 0.0612,
"step": 360
},
{
"epoch": 1.5778251599147122,
"grad_norm": 0.15880550118378636,
"learning_rate": 2.2219980077055756e-06,
"loss": 0.0616,
"step": 370
},
{
"epoch": 1.6204690831556503,
"grad_norm": 0.17610380896685834,
"learning_rate": 1.8051660917090718e-06,
"loss": 0.0593,
"step": 380
},
{
"epoch": 1.6631130063965884,
"grad_norm": 0.1852362138493889,
"learning_rate": 1.4277317449282834e-06,
"loss": 0.0608,
"step": 390
},
{
"epoch": 1.7057569296375266,
"grad_norm": 0.18397811408680578,
"learning_rate": 1.0915095246767692e-06,
"loss": 0.0603,
"step": 400
},
{
"epoch": 1.748400852878465,
"grad_norm": 0.18777942289827937,
"learning_rate": 7.981158564175074e-07,
"loss": 0.0607,
"step": 410
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.1785060533694166,
"learning_rate": 5.489612626189245e-07,
"loss": 0.0569,
"step": 420
},
{
"epoch": 1.8336886993603412,
"grad_norm": 0.17603977808158508,
"learning_rate": 3.452435815123323e-07,
"loss": 0.0604,
"step": 430
},
{
"epoch": 1.8763326226012793,
"grad_norm": 0.18882098153064591,
"learning_rate": 1.8794220835231413e-07,
"loss": 0.0609,
"step": 440
},
{
"epoch": 1.9189765458422174,
"grad_norm": 0.1838499771403163,
"learning_rate": 7.781338686584928e-08,
"loss": 0.0597,
"step": 450
},
{
"epoch": 1.9616204690831558,
"grad_norm": 0.17454958184966676,
"learning_rate": 1.5386573527067516e-08,
"loss": 0.0618,
"step": 460
},
{
"epoch": 1.9957356076759063,
"step": 468,
"total_flos": 140358569164800.0,
"train_loss": 0.10031582892705233,
"train_runtime": 1756.1076,
"train_samples_per_second": 34.132,
"train_steps_per_second": 0.266
}
],
"logging_steps": 10,
"max_steps": 468,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 140358569164800.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}