InjecAgent-Llama-2-7b-chat-hf-5 / trainer_state.json
henilp105's picture
Upload folder using huggingface_hub
8189c67 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1175,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0425531914893617,
"grad_norm": 0.49684813618659973,
"learning_rate": 0.00019999602855426865,
"loss": 1.1262,
"step": 10
},
{
"epoch": 0.0851063829787234,
"grad_norm": 0.43794313073158264,
"learning_rate": 0.00019998411453252217,
"loss": 0.8855,
"step": 20
},
{
"epoch": 0.1276595744680851,
"grad_norm": 0.7506595253944397,
"learning_rate": 0.0001999642588810784,
"loss": 0.652,
"step": 30
},
{
"epoch": 0.1702127659574468,
"grad_norm": 0.57978355884552,
"learning_rate": 0.00019993646317705016,
"loss": 0.4779,
"step": 40
},
{
"epoch": 0.2127659574468085,
"grad_norm": 0.583155632019043,
"learning_rate": 0.00019990072962822007,
"loss": 0.4035,
"step": 50
},
{
"epoch": 0.2553191489361702,
"grad_norm": 0.6853554844856262,
"learning_rate": 0.00019985706107286514,
"loss": 0.3479,
"step": 60
},
{
"epoch": 0.2978723404255319,
"grad_norm": 0.7098686099052429,
"learning_rate": 0.00019980546097953132,
"loss": 0.2903,
"step": 70
},
{
"epoch": 0.3404255319148936,
"grad_norm": 0.6163254380226135,
"learning_rate": 0.000199745933446758,
"loss": 0.2884,
"step": 80
},
{
"epoch": 0.3829787234042553,
"grad_norm": 1.0676591396331787,
"learning_rate": 0.0001996784832027525,
"loss": 0.2821,
"step": 90
},
{
"epoch": 0.425531914893617,
"grad_norm": 0.9656098484992981,
"learning_rate": 0.00019960311560501454,
"loss": 0.2012,
"step": 100
},
{
"epoch": 0.46808510638297873,
"grad_norm": 0.7761662602424622,
"learning_rate": 0.00019951983663991056,
"loss": 0.2072,
"step": 110
},
{
"epoch": 0.5106382978723404,
"grad_norm": 0.7839683294296265,
"learning_rate": 0.00019942865292219838,
"loss": 0.1505,
"step": 120
},
{
"epoch": 0.5531914893617021,
"grad_norm": 0.6823858618736267,
"learning_rate": 0.0001993295716945017,
"loss": 0.1818,
"step": 130
},
{
"epoch": 0.5957446808510638,
"grad_norm": 0.579463005065918,
"learning_rate": 0.00019922260082673497,
"loss": 0.1059,
"step": 140
},
{
"epoch": 0.6382978723404256,
"grad_norm": 1.7094753980636597,
"learning_rate": 0.000199107748815478,
"loss": 0.1426,
"step": 150
},
{
"epoch": 0.6808510638297872,
"grad_norm": 0.9202602505683899,
"learning_rate": 0.00019898502478330152,
"loss": 0.0965,
"step": 160
},
{
"epoch": 0.723404255319149,
"grad_norm": 0.37124085426330566,
"learning_rate": 0.00019885443847804211,
"loss": 0.1027,
"step": 170
},
{
"epoch": 0.7659574468085106,
"grad_norm": 0.4608505666255951,
"learning_rate": 0.0001987160002720283,
"loss": 0.0691,
"step": 180
},
{
"epoch": 0.8085106382978723,
"grad_norm": 0.3972225487232208,
"learning_rate": 0.00019856972116125653,
"loss": 0.0891,
"step": 190
},
{
"epoch": 0.851063829787234,
"grad_norm": 0.8271582126617432,
"learning_rate": 0.0001984156127645178,
"loss": 0.0726,
"step": 200
},
{
"epoch": 0.8936170212765957,
"grad_norm": 0.889887809753418,
"learning_rate": 0.0001982536873224748,
"loss": 0.1066,
"step": 210
},
{
"epoch": 0.9361702127659575,
"grad_norm": 0.7588837742805481,
"learning_rate": 0.00019808395769668963,
"loss": 0.0511,
"step": 220
},
{
"epoch": 0.9787234042553191,
"grad_norm": 0.3827434480190277,
"learning_rate": 0.00019790643736860227,
"loss": 0.0746,
"step": 230
},
{
"epoch": 1.0212765957446808,
"grad_norm": 1.043514609336853,
"learning_rate": 0.00019772114043845965,
"loss": 0.059,
"step": 240
},
{
"epoch": 1.0638297872340425,
"grad_norm": 0.5586545467376709,
"learning_rate": 0.0001975280816241959,
"loss": 0.0457,
"step": 250
},
{
"epoch": 1.1063829787234043,
"grad_norm": 0.6704376935958862,
"learning_rate": 0.00019732727626026305,
"loss": 0.0572,
"step": 260
},
{
"epoch": 1.148936170212766,
"grad_norm": 0.19444280862808228,
"learning_rate": 0.0001971187402964132,
"loss": 0.037,
"step": 270
},
{
"epoch": 1.1914893617021276,
"grad_norm": 0.3722103238105774,
"learning_rate": 0.00019690249029643162,
"loss": 0.0716,
"step": 280
},
{
"epoch": 1.2340425531914894,
"grad_norm": 0.50620037317276,
"learning_rate": 0.0001966785434368211,
"loss": 0.0346,
"step": 290
},
{
"epoch": 1.2765957446808511,
"grad_norm": 0.49648305773735046,
"learning_rate": 0.00019644691750543767,
"loss": 0.0288,
"step": 300
},
{
"epoch": 1.3191489361702127,
"grad_norm": 0.3201454281806946,
"learning_rate": 0.00019620763090007762,
"loss": 0.0317,
"step": 310
},
{
"epoch": 1.3617021276595744,
"grad_norm": 0.2545865774154663,
"learning_rate": 0.00019596070262701626,
"loss": 0.0166,
"step": 320
},
{
"epoch": 1.4042553191489362,
"grad_norm": 0.23364359140396118,
"learning_rate": 0.00019570615229949842,
"loss": 0.0357,
"step": 330
},
{
"epoch": 1.4468085106382977,
"grad_norm": 0.4658994972705841,
"learning_rate": 0.00019544400013618023,
"loss": 0.0319,
"step": 340
},
{
"epoch": 1.4893617021276595,
"grad_norm": 0.11836043000221252,
"learning_rate": 0.00019517426695952358,
"loss": 0.0228,
"step": 350
},
{
"epoch": 1.5319148936170213,
"grad_norm": 0.7419282793998718,
"learning_rate": 0.00019489697419414182,
"loss": 0.0323,
"step": 360
},
{
"epoch": 1.574468085106383,
"grad_norm": 0.8870681524276733,
"learning_rate": 0.00019461214386509842,
"loss": 0.0493,
"step": 370
},
{
"epoch": 1.6170212765957448,
"grad_norm": 0.5847530961036682,
"learning_rate": 0.00019431979859615726,
"loss": 0.0487,
"step": 380
},
{
"epoch": 1.6595744680851063,
"grad_norm": 0.9245363473892212,
"learning_rate": 0.00019401996160798573,
"loss": 0.0634,
"step": 390
},
{
"epoch": 1.702127659574468,
"grad_norm": 0.5757696032524109,
"learning_rate": 0.00019371265671631037,
"loss": 0.0439,
"step": 400
},
{
"epoch": 1.7446808510638299,
"grad_norm": 0.5014369487762451,
"learning_rate": 0.00019339790833002515,
"loss": 0.0316,
"step": 410
},
{
"epoch": 1.7872340425531914,
"grad_norm": 0.49838268756866455,
"learning_rate": 0.00019307574144925287,
"loss": 0.0535,
"step": 420
},
{
"epoch": 1.8297872340425532,
"grad_norm": 0.848798930644989,
"learning_rate": 0.00019274618166335912,
"loss": 0.0396,
"step": 430
},
{
"epoch": 1.872340425531915,
"grad_norm": 0.41701218485832214,
"learning_rate": 0.00019240925514892,
"loss": 0.027,
"step": 440
},
{
"epoch": 1.9148936170212765,
"grad_norm": 1.0935739278793335,
"learning_rate": 0.00019206498866764288,
"loss": 0.0321,
"step": 450
},
{
"epoch": 1.9574468085106385,
"grad_norm": 0.7538416385650635,
"learning_rate": 0.00019171340956424074,
"loss": 0.0221,
"step": 460
},
{
"epoch": 2.0,
"grad_norm": 0.4453732371330261,
"learning_rate": 0.0001913545457642601,
"loss": 0.0345,
"step": 470
},
{
"epoch": 2.0425531914893615,
"grad_norm": 0.2772938311100006,
"learning_rate": 0.00019098842577186314,
"loss": 0.0185,
"step": 480
},
{
"epoch": 2.0851063829787235,
"grad_norm": 0.12744171917438507,
"learning_rate": 0.00019061507866756347,
"loss": 0.0213,
"step": 490
},
{
"epoch": 2.127659574468085,
"grad_norm": 0.29645049571990967,
"learning_rate": 0.00019023453410591635,
"loss": 0.0257,
"step": 500
},
{
"epoch": 2.1702127659574466,
"grad_norm": 0.22986823320388794,
"learning_rate": 0.00018984682231316333,
"loss": 0.0099,
"step": 510
},
{
"epoch": 2.2127659574468086,
"grad_norm": 0.19042347371578217,
"learning_rate": 0.00018945197408483123,
"loss": 0.0163,
"step": 520
},
{
"epoch": 2.25531914893617,
"grad_norm": 0.13396915793418884,
"learning_rate": 0.00018905002078328632,
"loss": 0.0225,
"step": 530
},
{
"epoch": 2.297872340425532,
"grad_norm": 0.08411907404661179,
"learning_rate": 0.000188640994335243,
"loss": 0.0158,
"step": 540
},
{
"epoch": 2.3404255319148937,
"grad_norm": 0.16173473000526428,
"learning_rate": 0.0001882249272292282,
"loss": 0.0164,
"step": 550
},
{
"epoch": 2.382978723404255,
"grad_norm": 0.3895552158355713,
"learning_rate": 0.00018780185251300046,
"loss": 0.0255,
"step": 560
},
{
"epoch": 2.425531914893617,
"grad_norm": 0.42386066913604736,
"learning_rate": 0.00018737180379092537,
"loss": 0.0315,
"step": 570
},
{
"epoch": 2.4680851063829787,
"grad_norm": 0.10213664919137955,
"learning_rate": 0.0001869348152213061,
"loss": 0.0321,
"step": 580
},
{
"epoch": 2.5106382978723403,
"grad_norm": 0.27683985233306885,
"learning_rate": 0.0001864909215136705,
"loss": 0.0243,
"step": 590
},
{
"epoch": 2.5531914893617023,
"grad_norm": 0.7269228100776672,
"learning_rate": 0.00018604015792601396,
"loss": 0.0276,
"step": 600
},
{
"epoch": 2.595744680851064,
"grad_norm": 0.23035497963428497,
"learning_rate": 0.00018558256026199896,
"loss": 0.0208,
"step": 610
},
{
"epoch": 2.6382978723404253,
"grad_norm": 0.334359347820282,
"learning_rate": 0.00018511816486811134,
"loss": 0.0228,
"step": 620
},
{
"epoch": 2.6808510638297873,
"grad_norm": 0.07350382953882217,
"learning_rate": 0.00018464700863077312,
"loss": 0.0295,
"step": 630
},
{
"epoch": 2.723404255319149,
"grad_norm": 0.39336535334587097,
"learning_rate": 0.00018416912897341295,
"loss": 0.0289,
"step": 640
},
{
"epoch": 2.7659574468085104,
"grad_norm": 0.15300992131233215,
"learning_rate": 0.00018368456385349334,
"loss": 0.0183,
"step": 650
},
{
"epoch": 2.8085106382978724,
"grad_norm": 0.07117566466331482,
"learning_rate": 0.0001831933517594957,
"loss": 0.0345,
"step": 660
},
{
"epoch": 2.851063829787234,
"grad_norm": 0.1297655552625656,
"learning_rate": 0.0001826955317078636,
"loss": 0.0171,
"step": 670
},
{
"epoch": 2.8936170212765955,
"grad_norm": 0.361150324344635,
"learning_rate": 0.00018219114323990345,
"loss": 0.0174,
"step": 680
},
{
"epoch": 2.9361702127659575,
"grad_norm": 0.7295799851417542,
"learning_rate": 0.00018168022641864377,
"loss": 0.0226,
"step": 690
},
{
"epoch": 2.978723404255319,
"grad_norm": 0.06540104746818542,
"learning_rate": 0.00018116282182565311,
"loss": 0.0374,
"step": 700
},
{
"epoch": 3.021276595744681,
"grad_norm": 0.6617436408996582,
"learning_rate": 0.0001806389705578168,
"loss": 0.0136,
"step": 710
},
{
"epoch": 3.0638297872340425,
"grad_norm": 0.28398895263671875,
"learning_rate": 0.00018010871422407236,
"loss": 0.0185,
"step": 720
},
{
"epoch": 3.106382978723404,
"grad_norm": 0.07346879690885544,
"learning_rate": 0.00017957209494210493,
"loss": 0.0197,
"step": 730
},
{
"epoch": 3.148936170212766,
"grad_norm": 0.06647660583257675,
"learning_rate": 0.0001790291553350016,
"loss": 0.0093,
"step": 740
},
{
"epoch": 3.1914893617021276,
"grad_norm": 0.0874147042632103,
"learning_rate": 0.0001784799385278661,
"loss": 0.01,
"step": 750
},
{
"epoch": 3.2340425531914896,
"grad_norm": 0.45789024233818054,
"learning_rate": 0.00017792448814439333,
"loss": 0.0115,
"step": 760
},
{
"epoch": 3.276595744680851,
"grad_norm": 0.3800203204154968,
"learning_rate": 0.00017736284830340436,
"loss": 0.0209,
"step": 770
},
{
"epoch": 3.3191489361702127,
"grad_norm": 0.2505127191543579,
"learning_rate": 0.00017679506361534215,
"loss": 0.0167,
"step": 780
},
{
"epoch": 3.3617021276595747,
"grad_norm": 0.10206060856580734,
"learning_rate": 0.00017622117917872823,
"loss": 0.0172,
"step": 790
},
{
"epoch": 3.404255319148936,
"grad_norm": 0.45168524980545044,
"learning_rate": 0.00017564124057658056,
"loss": 0.0284,
"step": 800
},
{
"epoch": 3.4468085106382977,
"grad_norm": 0.44172531366348267,
"learning_rate": 0.00017505529387279277,
"loss": 0.0289,
"step": 810
},
{
"epoch": 3.4893617021276597,
"grad_norm": 0.20968879759311676,
"learning_rate": 0.00017446338560847568,
"loss": 0.0163,
"step": 820
},
{
"epoch": 3.5319148936170213,
"grad_norm": 0.2968202829360962,
"learning_rate": 0.00017386556279826021,
"loss": 0.0231,
"step": 830
},
{
"epoch": 3.574468085106383,
"grad_norm": 0.2778526246547699,
"learning_rate": 0.00017326187292656333,
"loss": 0.0157,
"step": 840
},
{
"epoch": 3.617021276595745,
"grad_norm": 0.08669642359018326,
"learning_rate": 0.00017265236394381633,
"loss": 0.0148,
"step": 850
},
{
"epoch": 3.6595744680851063,
"grad_norm": 0.06219767406582832,
"learning_rate": 0.00017203708426265614,
"loss": 0.0161,
"step": 860
},
{
"epoch": 3.702127659574468,
"grad_norm": 0.07938496023416519,
"learning_rate": 0.00017141608275408006,
"loss": 0.0145,
"step": 870
},
{
"epoch": 3.74468085106383,
"grad_norm": 0.4254000186920166,
"learning_rate": 0.00017078940874356392,
"loss": 0.0133,
"step": 880
},
{
"epoch": 3.7872340425531914,
"grad_norm": 0.2674030065536499,
"learning_rate": 0.00017015711200714414,
"loss": 0.0162,
"step": 890
},
{
"epoch": 3.829787234042553,
"grad_norm": 0.08741254359483719,
"learning_rate": 0.00016951924276746425,
"loss": 0.0208,
"step": 900
},
{
"epoch": 3.872340425531915,
"grad_norm": 0.09643790125846863,
"learning_rate": 0.00016887585168978562,
"loss": 0.0109,
"step": 910
},
{
"epoch": 3.9148936170212765,
"grad_norm": 0.2952460050582886,
"learning_rate": 0.0001682269898779632,
"loss": 0.0154,
"step": 920
},
{
"epoch": 3.9574468085106385,
"grad_norm": 0.2182932198047638,
"learning_rate": 0.00016757270887038654,
"loss": 0.0156,
"step": 930
},
{
"epoch": 4.0,
"grad_norm": 0.3799554109573364,
"learning_rate": 0.00016691306063588583,
"loss": 0.014,
"step": 940
},
{
"epoch": 4.042553191489362,
"grad_norm": 0.3694469630718231,
"learning_rate": 0.00016624809756960444,
"loss": 0.0114,
"step": 950
},
{
"epoch": 4.085106382978723,
"grad_norm": 0.42959412932395935,
"learning_rate": 0.00016557787248883696,
"loss": 0.0127,
"step": 960
},
{
"epoch": 4.127659574468085,
"grad_norm": 0.25515562295913696,
"learning_rate": 0.00016490243862883413,
"loss": 0.0145,
"step": 970
},
{
"epoch": 4.170212765957447,
"grad_norm": 0.1240379586815834,
"learning_rate": 0.00016422184963857432,
"loss": 0.012,
"step": 980
},
{
"epoch": 4.212765957446808,
"grad_norm": 0.3189831078052521,
"learning_rate": 0.00016353615957650236,
"loss": 0.012,
"step": 990
},
{
"epoch": 4.25531914893617,
"grad_norm": 0.08477319031953812,
"learning_rate": 0.00016284542290623567,
"loss": 0.0103,
"step": 1000
},
{
"epoch": 4.297872340425532,
"grad_norm": 0.0562283881008625,
"learning_rate": 0.00016214969449223824,
"loss": 0.0159,
"step": 1010
},
{
"epoch": 4.340425531914893,
"grad_norm": 0.07840266078710556,
"learning_rate": 0.00016144902959546286,
"loss": 0.011,
"step": 1020
},
{
"epoch": 4.382978723404255,
"grad_norm": 0.33640316128730774,
"learning_rate": 0.00016074348386896177,
"loss": 0.0194,
"step": 1030
},
{
"epoch": 4.425531914893617,
"grad_norm": 0.06994381546974182,
"learning_rate": 0.00016003311335346636,
"loss": 0.0146,
"step": 1040
},
{
"epoch": 4.468085106382979,
"grad_norm": 0.4166668951511383,
"learning_rate": 0.00015931797447293552,
"loss": 0.0117,
"step": 1050
},
{
"epoch": 4.51063829787234,
"grad_norm": 0.10744202882051468,
"learning_rate": 0.00015859812403007443,
"loss": 0.0108,
"step": 1060
},
{
"epoch": 4.553191489361702,
"grad_norm": 0.06330662965774536,
"learning_rate": 0.0001578736192018224,
"loss": 0.0092,
"step": 1070
},
{
"epoch": 4.595744680851064,
"grad_norm": 0.6020157933235168,
"learning_rate": 0.00015714451753481168,
"loss": 0.0165,
"step": 1080
},
{
"epoch": 4.638297872340425,
"grad_norm": 0.07121607661247253,
"learning_rate": 0.0001564108769407962,
"loss": 0.0169,
"step": 1090
},
{
"epoch": 4.680851063829787,
"grad_norm": 0.3467644155025482,
"learning_rate": 0.00015567275569205218,
"loss": 0.0167,
"step": 1100
},
{
"epoch": 4.723404255319149,
"grad_norm": 0.2611311972141266,
"learning_rate": 0.00015493021241674918,
"loss": 0.0146,
"step": 1110
},
{
"epoch": 4.76595744680851,
"grad_norm": 0.35593244433403015,
"learning_rate": 0.0001541833060942937,
"loss": 0.0181,
"step": 1120
},
{
"epoch": 4.808510638297872,
"grad_norm": 0.05776326358318329,
"learning_rate": 0.00015343209605064422,
"loss": 0.0103,
"step": 1130
},
{
"epoch": 4.851063829787234,
"grad_norm": 0.07242272049188614,
"learning_rate": 0.00015267664195359917,
"loss": 0.009,
"step": 1140
},
{
"epoch": 4.8936170212765955,
"grad_norm": 0.07159385830163956,
"learning_rate": 0.00015191700380805752,
"loss": 0.0136,
"step": 1150
},
{
"epoch": 4.9361702127659575,
"grad_norm": 0.061515677720308304,
"learning_rate": 0.00015115324195125274,
"loss": 0.0118,
"step": 1160
},
{
"epoch": 4.9787234042553195,
"grad_norm": 0.4526319205760956,
"learning_rate": 0.00015038541704796003,
"loss": 0.0171,
"step": 1170
}
],
"logging_steps": 10,
"max_steps": 3525,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.71512405225472e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}