ayuwal12's picture
Upload LoRA fine-tuned BioMistral-7B model
6690520 verified
{
"best_global_step": 1000,
"best_metric": 0.8179630041122437,
"best_model_checkpoint": "./biomistral-lora-finetuned/checkpoint-1000",
"epoch": 1.0823290453622207,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010832769126607989,
"grad_norm": 0.7727395296096802,
"learning_rate": 1.8e-05,
"loss": 0.889,
"step": 10
},
{
"epoch": 0.021665538253215978,
"grad_norm": 0.8008129596710205,
"learning_rate": 3.8e-05,
"loss": 0.8378,
"step": 20
},
{
"epoch": 0.03249830737982397,
"grad_norm": 0.9147247076034546,
"learning_rate": 5.8e-05,
"loss": 0.8108,
"step": 30
},
{
"epoch": 0.043331076506431955,
"grad_norm": 0.8121607303619385,
"learning_rate": 7.800000000000001e-05,
"loss": 0.8597,
"step": 40
},
{
"epoch": 0.05416384563303995,
"grad_norm": 1.0018593072891235,
"learning_rate": 9.8e-05,
"loss": 0.7486,
"step": 50
},
{
"epoch": 0.06499661475964794,
"grad_norm": 1.2048218250274658,
"learning_rate": 0.000118,
"loss": 0.6825,
"step": 60
},
{
"epoch": 0.07582938388625593,
"grad_norm": 0.9863468408584595,
"learning_rate": 0.000138,
"loss": 0.6539,
"step": 70
},
{
"epoch": 0.08666215301286391,
"grad_norm": 1.2911494970321655,
"learning_rate": 0.00015800000000000002,
"loss": 0.6198,
"step": 80
},
{
"epoch": 0.0974949221394719,
"grad_norm": 1.159672737121582,
"learning_rate": 0.00017800000000000002,
"loss": 0.6222,
"step": 90
},
{
"epoch": 0.1083276912660799,
"grad_norm": 1.0924432277679443,
"learning_rate": 0.00019800000000000002,
"loss": 0.5923,
"step": 100
},
{
"epoch": 0.11916046039268788,
"grad_norm": 1.3423463106155396,
"learning_rate": 0.00019932634730538925,
"loss": 0.5548,
"step": 110
},
{
"epoch": 0.12999322951929587,
"grad_norm": 1.4929102659225464,
"learning_rate": 0.00019857784431137723,
"loss": 0.6701,
"step": 120
},
{
"epoch": 0.14082599864590387,
"grad_norm": 0.9462954998016357,
"learning_rate": 0.00019782934131736527,
"loss": 0.8675,
"step": 130
},
{
"epoch": 0.15165876777251186,
"grad_norm": 0.9912289977073669,
"learning_rate": 0.0001970808383233533,
"loss": 0.9074,
"step": 140
},
{
"epoch": 0.16249153689911983,
"grad_norm": 1.1070538759231567,
"learning_rate": 0.00019633233532934132,
"loss": 0.8755,
"step": 150
},
{
"epoch": 0.17332430602572782,
"grad_norm": 0.9465340375900269,
"learning_rate": 0.00019558383233532936,
"loss": 0.882,
"step": 160
},
{
"epoch": 0.18415707515233581,
"grad_norm": 0.8657329678535461,
"learning_rate": 0.00019483532934131737,
"loss": 0.8737,
"step": 170
},
{
"epoch": 0.1949898442789438,
"grad_norm": 0.7293577790260315,
"learning_rate": 0.0001940868263473054,
"loss": 0.8473,
"step": 180
},
{
"epoch": 0.2058226134055518,
"grad_norm": 0.849353551864624,
"learning_rate": 0.00019333832335329343,
"loss": 0.9414,
"step": 190
},
{
"epoch": 0.2166553825321598,
"grad_norm": 0.7525314688682556,
"learning_rate": 0.00019258982035928144,
"loss": 0.8852,
"step": 200
},
{
"epoch": 0.22748815165876776,
"grad_norm": 1.0732208490371704,
"learning_rate": 0.00019184131736526948,
"loss": 0.8074,
"step": 210
},
{
"epoch": 0.23832092078537576,
"grad_norm": 0.8420374393463135,
"learning_rate": 0.0001910928143712575,
"loss": 0.9508,
"step": 220
},
{
"epoch": 0.24915368991198375,
"grad_norm": 0.8308244347572327,
"learning_rate": 0.0001903443113772455,
"loss": 0.8734,
"step": 230
},
{
"epoch": 0.25998645903859174,
"grad_norm": 0.9915153384208679,
"learning_rate": 0.00018959580838323354,
"loss": 0.8816,
"step": 240
},
{
"epoch": 0.2708192281651997,
"grad_norm": 4.8621978759765625,
"learning_rate": 0.00018884730538922158,
"loss": 0.8848,
"step": 250
},
{
"epoch": 0.28165199729180773,
"grad_norm": 0.7945590019226074,
"learning_rate": 0.0001880988023952096,
"loss": 0.8503,
"step": 260
},
{
"epoch": 0.2924847664184157,
"grad_norm": 0.7896672487258911,
"learning_rate": 0.00018735029940119763,
"loss": 0.8798,
"step": 270
},
{
"epoch": 0.3033175355450237,
"grad_norm": 0.8870701789855957,
"learning_rate": 0.00018660179640718564,
"loss": 0.9112,
"step": 280
},
{
"epoch": 0.3141503046716317,
"grad_norm": 0.9003740549087524,
"learning_rate": 0.00018585329341317365,
"loss": 0.846,
"step": 290
},
{
"epoch": 0.32498307379823965,
"grad_norm": 0.7067676186561584,
"learning_rate": 0.0001851047904191617,
"loss": 0.8588,
"step": 300
},
{
"epoch": 0.3358158429248477,
"grad_norm": 0.9696246385574341,
"learning_rate": 0.0001843562874251497,
"loss": 0.8244,
"step": 310
},
{
"epoch": 0.34664861205145564,
"grad_norm": 0.9892609715461731,
"learning_rate": 0.00018360778443113774,
"loss": 0.8214,
"step": 320
},
{
"epoch": 0.35748138117806366,
"grad_norm": 0.822260856628418,
"learning_rate": 0.00018285928143712575,
"loss": 0.7977,
"step": 330
},
{
"epoch": 0.36831415030467163,
"grad_norm": 0.7743964791297913,
"learning_rate": 0.00018211077844311376,
"loss": 0.8002,
"step": 340
},
{
"epoch": 0.3791469194312796,
"grad_norm": 0.7090775370597839,
"learning_rate": 0.0001813622754491018,
"loss": 0.8192,
"step": 350
},
{
"epoch": 0.3899796885578876,
"grad_norm": 1.0970802307128906,
"learning_rate": 0.00018061377245508984,
"loss": 0.8516,
"step": 360
},
{
"epoch": 0.4008124576844956,
"grad_norm": 0.9633163213729858,
"learning_rate": 0.00017986526946107785,
"loss": 0.8414,
"step": 370
},
{
"epoch": 0.4116452268111036,
"grad_norm": 0.6846926808357239,
"learning_rate": 0.00017911676646706587,
"loss": 0.8187,
"step": 380
},
{
"epoch": 0.42247799593771157,
"grad_norm": 0.7262110710144043,
"learning_rate": 0.0001783682634730539,
"loss": 0.8572,
"step": 390
},
{
"epoch": 0.4333107650643196,
"grad_norm": 0.8537372350692749,
"learning_rate": 0.00017761976047904192,
"loss": 0.8286,
"step": 400
},
{
"epoch": 0.44414353419092756,
"grad_norm": 0.8860271573066711,
"learning_rate": 0.00017687125748502996,
"loss": 0.8416,
"step": 410
},
{
"epoch": 0.4549763033175355,
"grad_norm": 0.7984218597412109,
"learning_rate": 0.000176122754491018,
"loss": 0.8373,
"step": 420
},
{
"epoch": 0.46580907244414355,
"grad_norm": 0.8060943484306335,
"learning_rate": 0.000175374251497006,
"loss": 0.9165,
"step": 430
},
{
"epoch": 0.4766418415707515,
"grad_norm": 0.7871391177177429,
"learning_rate": 0.00017462574850299402,
"loss": 0.8276,
"step": 440
},
{
"epoch": 0.48747461069735953,
"grad_norm": 0.7732688784599304,
"learning_rate": 0.00017387724550898203,
"loss": 0.8346,
"step": 450
},
{
"epoch": 0.4983073798239675,
"grad_norm": 0.9314000606536865,
"learning_rate": 0.00017312874251497007,
"loss": 0.8291,
"step": 460
},
{
"epoch": 0.5091401489505755,
"grad_norm": 0.6721988916397095,
"learning_rate": 0.0001723802395209581,
"loss": 0.7091,
"step": 470
},
{
"epoch": 0.5199729180771835,
"grad_norm": 0.825965940952301,
"learning_rate": 0.00017163173652694612,
"loss": 0.8934,
"step": 480
},
{
"epoch": 0.5308056872037915,
"grad_norm": 0.8427668213844299,
"learning_rate": 0.00017088323353293413,
"loss": 0.7603,
"step": 490
},
{
"epoch": 0.5416384563303994,
"grad_norm": 1.0061259269714355,
"learning_rate": 0.00017013473053892217,
"loss": 0.8277,
"step": 500
},
{
"epoch": 0.5416384563303994,
"eval_loss": 0.8331602811813354,
"eval_runtime": 355.9061,
"eval_samples_per_second": 4.614,
"eval_steps_per_second": 2.307,
"step": 500
},
{
"epoch": 0.5524712254570074,
"grad_norm": 0.8820628523826599,
"learning_rate": 0.00016938622754491018,
"loss": 0.8348,
"step": 510
},
{
"epoch": 0.5633039945836155,
"grad_norm": 0.8095284700393677,
"learning_rate": 0.00016863772455089822,
"loss": 0.9172,
"step": 520
},
{
"epoch": 0.5741367637102234,
"grad_norm": 0.6959540843963623,
"learning_rate": 0.00016788922155688623,
"loss": 0.838,
"step": 530
},
{
"epoch": 0.5849695328368314,
"grad_norm": 0.835831880569458,
"learning_rate": 0.00016714071856287424,
"loss": 0.8887,
"step": 540
},
{
"epoch": 0.5958023019634394,
"grad_norm": 0.9289611577987671,
"learning_rate": 0.00016639221556886228,
"loss": 0.8514,
"step": 550
},
{
"epoch": 0.6066350710900474,
"grad_norm": 0.6904628872871399,
"learning_rate": 0.00016564371257485032,
"loss": 0.8645,
"step": 560
},
{
"epoch": 0.6174678402166554,
"grad_norm": 0.8879178762435913,
"learning_rate": 0.00016489520958083833,
"loss": 0.8201,
"step": 570
},
{
"epoch": 0.6283006093432634,
"grad_norm": 0.8411425948143005,
"learning_rate": 0.00016414670658682637,
"loss": 0.836,
"step": 580
},
{
"epoch": 0.6391333784698714,
"grad_norm": 0.8564555644989014,
"learning_rate": 0.00016339820359281436,
"loss": 0.7724,
"step": 590
},
{
"epoch": 0.6499661475964793,
"grad_norm": 0.8382830619812012,
"learning_rate": 0.0001626497005988024,
"loss": 0.7839,
"step": 600
},
{
"epoch": 0.6607989167230873,
"grad_norm": 0.7657437920570374,
"learning_rate": 0.00016190119760479043,
"loss": 0.7973,
"step": 610
},
{
"epoch": 0.6716316858496953,
"grad_norm": 0.7758445143699646,
"learning_rate": 0.00016115269461077845,
"loss": 0.8111,
"step": 620
},
{
"epoch": 0.6824644549763034,
"grad_norm": 1.0041533708572388,
"learning_rate": 0.00016040419161676649,
"loss": 0.8359,
"step": 630
},
{
"epoch": 0.6932972241029113,
"grad_norm": 0.9679577946662903,
"learning_rate": 0.0001596556886227545,
"loss": 0.8822,
"step": 640
},
{
"epoch": 0.7041299932295193,
"grad_norm": 0.8141391277313232,
"learning_rate": 0.0001589071856287425,
"loss": 0.8714,
"step": 650
},
{
"epoch": 0.7149627623561273,
"grad_norm": 0.7982810139656067,
"learning_rate": 0.00015815868263473055,
"loss": 0.856,
"step": 660
},
{
"epoch": 0.7257955314827352,
"grad_norm": 0.7932000160217285,
"learning_rate": 0.00015741017964071859,
"loss": 0.8405,
"step": 670
},
{
"epoch": 0.7366283006093433,
"grad_norm": 0.7269508242607117,
"learning_rate": 0.0001566616766467066,
"loss": 0.8371,
"step": 680
},
{
"epoch": 0.7474610697359513,
"grad_norm": 0.9001722931861877,
"learning_rate": 0.0001559131736526946,
"loss": 0.8305,
"step": 690
},
{
"epoch": 0.7582938388625592,
"grad_norm": 0.6795508861541748,
"learning_rate": 0.00015516467065868262,
"loss": 0.8324,
"step": 700
},
{
"epoch": 0.7691266079891672,
"grad_norm": 0.8868729472160339,
"learning_rate": 0.00015441616766467066,
"loss": 0.8521,
"step": 710
},
{
"epoch": 0.7799593771157752,
"grad_norm": 0.9720478653907776,
"learning_rate": 0.0001536676646706587,
"loss": 0.7759,
"step": 720
},
{
"epoch": 0.7907921462423833,
"grad_norm": 0.8006075620651245,
"learning_rate": 0.0001529191616766467,
"loss": 0.7981,
"step": 730
},
{
"epoch": 0.8016249153689912,
"grad_norm": 0.9107721447944641,
"learning_rate": 0.00015217065868263475,
"loss": 0.7868,
"step": 740
},
{
"epoch": 0.8124576844955992,
"grad_norm": 0.7584466338157654,
"learning_rate": 0.00015142215568862276,
"loss": 0.7401,
"step": 750
},
{
"epoch": 0.8232904536222072,
"grad_norm": 1.0075221061706543,
"learning_rate": 0.00015067365269461077,
"loss": 0.8024,
"step": 760
},
{
"epoch": 0.8341232227488151,
"grad_norm": 0.8769344091415405,
"learning_rate": 0.0001499251497005988,
"loss": 0.7779,
"step": 770
},
{
"epoch": 0.8449559918754231,
"grad_norm": 0.84312903881073,
"learning_rate": 0.00014917664670658685,
"loss": 0.8314,
"step": 780
},
{
"epoch": 0.8557887610020312,
"grad_norm": 0.8116353750228882,
"learning_rate": 0.00014842814371257486,
"loss": 0.8146,
"step": 790
},
{
"epoch": 0.8666215301286392,
"grad_norm": 0.8301011919975281,
"learning_rate": 0.00014767964071856287,
"loss": 0.7422,
"step": 800
},
{
"epoch": 0.8774542992552471,
"grad_norm": 0.8579692244529724,
"learning_rate": 0.00014693113772455091,
"loss": 0.7442,
"step": 810
},
{
"epoch": 0.8882870683818551,
"grad_norm": 0.7513943910598755,
"learning_rate": 0.00014618263473053893,
"loss": 0.7671,
"step": 820
},
{
"epoch": 0.8991198375084631,
"grad_norm": 0.9639107584953308,
"learning_rate": 0.00014543413173652696,
"loss": 0.7896,
"step": 830
},
{
"epoch": 0.909952606635071,
"grad_norm": 0.8897636532783508,
"learning_rate": 0.00014468562874251498,
"loss": 0.7613,
"step": 840
},
{
"epoch": 0.9207853757616791,
"grad_norm": 0.7998213171958923,
"learning_rate": 0.000143937125748503,
"loss": 0.7647,
"step": 850
},
{
"epoch": 0.9316181448882871,
"grad_norm": 0.6916050910949707,
"learning_rate": 0.00014318862275449103,
"loss": 0.7697,
"step": 860
},
{
"epoch": 0.942450914014895,
"grad_norm": 1.0154324769973755,
"learning_rate": 0.00014244011976047904,
"loss": 0.7314,
"step": 870
},
{
"epoch": 0.953283683141503,
"grad_norm": 0.9787517786026001,
"learning_rate": 0.00014169161676646708,
"loss": 0.8047,
"step": 880
},
{
"epoch": 0.964116452268111,
"grad_norm": 0.6035457253456116,
"learning_rate": 0.00014094311377245512,
"loss": 0.783,
"step": 890
},
{
"epoch": 0.9749492213947191,
"grad_norm": 0.940951943397522,
"learning_rate": 0.0001401946107784431,
"loss": 0.7741,
"step": 900
},
{
"epoch": 0.985781990521327,
"grad_norm": 0.7785654067993164,
"learning_rate": 0.00013944610778443114,
"loss": 0.7855,
"step": 910
},
{
"epoch": 0.996614759647935,
"grad_norm": 0.8356137275695801,
"learning_rate": 0.00013869760479041918,
"loss": 0.8292,
"step": 920
},
{
"epoch": 1.0064996614759647,
"grad_norm": 0.6590499877929688,
"learning_rate": 0.0001379491017964072,
"loss": 0.6858,
"step": 930
},
{
"epoch": 1.0173324306025728,
"grad_norm": 1.0389671325683594,
"learning_rate": 0.00013720059880239523,
"loss": 0.6097,
"step": 940
},
{
"epoch": 1.0281651997291807,
"grad_norm": 0.9596243500709534,
"learning_rate": 0.00013645209580838324,
"loss": 0.5676,
"step": 950
},
{
"epoch": 1.0389979688557887,
"grad_norm": 1.0831798315048218,
"learning_rate": 0.00013570359281437125,
"loss": 0.6106,
"step": 960
},
{
"epoch": 1.0498307379823968,
"grad_norm": 0.92978835105896,
"learning_rate": 0.0001349550898203593,
"loss": 0.5924,
"step": 970
},
{
"epoch": 1.0606635071090047,
"grad_norm": 0.9672062993049622,
"learning_rate": 0.0001342065868263473,
"loss": 0.5496,
"step": 980
},
{
"epoch": 1.0714962762356128,
"grad_norm": 1.1402652263641357,
"learning_rate": 0.00013345808383233534,
"loss": 0.5871,
"step": 990
},
{
"epoch": 1.0823290453622207,
"grad_norm": 1.1109035015106201,
"learning_rate": 0.00013270958083832335,
"loss": 0.5424,
"step": 1000
},
{
"epoch": 1.0823290453622207,
"eval_loss": 0.8179630041122437,
"eval_runtime": 357.2769,
"eval_samples_per_second": 4.596,
"eval_steps_per_second": 2.298,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 2772,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.523118244330209e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}