winddude's picture
upload model and checkpoints
fa6bdfb
{
"best_metric": 0.7122384905815125,
"best_model_checkpoint": "/home/llmadmin/models/loras-trained/wizardLM-llama-lora-13b/checkpoint-1600",
"epoch": 2.8235294117647056,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.2881,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 5.6999999999999996e-05,
"loss": 1.2633,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 8.699999999999999e-05,
"loss": 1.1882,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 0.000117,
"loss": 1.0592,
"step": 40
},
{
"epoch": 0.09,
"learning_rate": 0.000147,
"loss": 0.9732,
"step": 50
},
{
"epoch": 0.11,
"learning_rate": 0.00017699999999999997,
"loss": 0.8958,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 0.00020699999999999996,
"loss": 0.8443,
"step": 70
},
{
"epoch": 0.14,
"learning_rate": 0.000237,
"loss": 0.8324,
"step": 80
},
{
"epoch": 0.16,
"learning_rate": 0.000267,
"loss": 0.812,
"step": 90
},
{
"epoch": 0.18,
"learning_rate": 0.00029699999999999996,
"loss": 0.8052,
"step": 100
},
{
"epoch": 0.19,
"learning_rate": 0.0002983103879849812,
"loss": 0.7965,
"step": 110
},
{
"epoch": 0.21,
"learning_rate": 0.000296433041301627,
"loss": 0.7921,
"step": 120
},
{
"epoch": 0.23,
"learning_rate": 0.0002945556946182728,
"loss": 0.7823,
"step": 130
},
{
"epoch": 0.25,
"learning_rate": 0.0002926783479349186,
"loss": 0.7768,
"step": 140
},
{
"epoch": 0.26,
"learning_rate": 0.00029080100125156443,
"loss": 0.7813,
"step": 150
},
{
"epoch": 0.28,
"learning_rate": 0.00028892365456821025,
"loss": 0.7685,
"step": 160
},
{
"epoch": 0.3,
"learning_rate": 0.00028704630788485607,
"loss": 0.7754,
"step": 170
},
{
"epoch": 0.32,
"learning_rate": 0.00028516896120150183,
"loss": 0.7686,
"step": 180
},
{
"epoch": 0.34,
"learning_rate": 0.00028329161451814765,
"loss": 0.7662,
"step": 190
},
{
"epoch": 0.35,
"learning_rate": 0.0002814142678347935,
"loss": 0.7624,
"step": 200
},
{
"epoch": 0.35,
"eval_loss": 0.7555728554725647,
"eval_runtime": 65.5948,
"eval_samples_per_second": 30.49,
"eval_steps_per_second": 0.762,
"step": 200
},
{
"epoch": 0.37,
"learning_rate": 0.00027953692115143924,
"loss": 0.7592,
"step": 210
},
{
"epoch": 0.39,
"learning_rate": 0.00027765957446808506,
"loss": 0.769,
"step": 220
},
{
"epoch": 0.41,
"learning_rate": 0.0002757822277847309,
"loss": 0.7649,
"step": 230
},
{
"epoch": 0.42,
"learning_rate": 0.0002739048811013767,
"loss": 0.7539,
"step": 240
},
{
"epoch": 0.44,
"learning_rate": 0.0002720275344180225,
"loss": 0.7565,
"step": 250
},
{
"epoch": 0.46,
"learning_rate": 0.0002701501877346683,
"loss": 0.7441,
"step": 260
},
{
"epoch": 0.48,
"learning_rate": 0.0002682728410513141,
"loss": 0.7473,
"step": 270
},
{
"epoch": 0.49,
"learning_rate": 0.0002663954943679599,
"loss": 0.7518,
"step": 280
},
{
"epoch": 0.51,
"learning_rate": 0.00026451814768460575,
"loss": 0.7476,
"step": 290
},
{
"epoch": 0.53,
"learning_rate": 0.00026264080100125157,
"loss": 0.749,
"step": 300
},
{
"epoch": 0.55,
"learning_rate": 0.00026076345431789733,
"loss": 0.7521,
"step": 310
},
{
"epoch": 0.56,
"learning_rate": 0.00025888610763454315,
"loss": 0.7494,
"step": 320
},
{
"epoch": 0.58,
"learning_rate": 0.00025700876095118897,
"loss": 0.7466,
"step": 330
},
{
"epoch": 0.6,
"learning_rate": 0.0002551314142678348,
"loss": 0.7457,
"step": 340
},
{
"epoch": 0.62,
"learning_rate": 0.00025325406758448056,
"loss": 0.7563,
"step": 350
},
{
"epoch": 0.64,
"learning_rate": 0.0002513767209011264,
"loss": 0.7373,
"step": 360
},
{
"epoch": 0.65,
"learning_rate": 0.0002494993742177722,
"loss": 0.7335,
"step": 370
},
{
"epoch": 0.67,
"learning_rate": 0.000247622027534418,
"loss": 0.7361,
"step": 380
},
{
"epoch": 0.69,
"learning_rate": 0.00024574468085106384,
"loss": 0.7439,
"step": 390
},
{
"epoch": 0.71,
"learning_rate": 0.0002438673341677096,
"loss": 0.7454,
"step": 400
},
{
"epoch": 0.71,
"eval_loss": 0.7359363436698914,
"eval_runtime": 65.8338,
"eval_samples_per_second": 30.38,
"eval_steps_per_second": 0.759,
"step": 400
},
{
"epoch": 0.72,
"learning_rate": 0.00024198998748435542,
"loss": 0.7299,
"step": 410
},
{
"epoch": 0.74,
"learning_rate": 0.00024011264080100122,
"loss": 0.7483,
"step": 420
},
{
"epoch": 0.76,
"learning_rate": 0.000238235294117647,
"loss": 0.7318,
"step": 430
},
{
"epoch": 0.78,
"learning_rate": 0.00023635794743429286,
"loss": 0.7309,
"step": 440
},
{
"epoch": 0.79,
"learning_rate": 0.00023448060075093865,
"loss": 0.7436,
"step": 450
},
{
"epoch": 0.81,
"learning_rate": 0.00023260325406758447,
"loss": 0.7327,
"step": 460
},
{
"epoch": 0.83,
"learning_rate": 0.00023072590738423026,
"loss": 0.7414,
"step": 470
},
{
"epoch": 0.85,
"learning_rate": 0.00022884856070087608,
"loss": 0.7337,
"step": 480
},
{
"epoch": 0.86,
"learning_rate": 0.00022697121401752188,
"loss": 0.7399,
"step": 490
},
{
"epoch": 0.88,
"learning_rate": 0.00022509386733416767,
"loss": 0.7294,
"step": 500
},
{
"epoch": 0.9,
"learning_rate": 0.00022321652065081352,
"loss": 0.7186,
"step": 510
},
{
"epoch": 0.92,
"learning_rate": 0.0002213391739674593,
"loss": 0.7358,
"step": 520
},
{
"epoch": 0.94,
"learning_rate": 0.00021946182728410513,
"loss": 0.7296,
"step": 530
},
{
"epoch": 0.95,
"learning_rate": 0.00021758448060075092,
"loss": 0.7256,
"step": 540
},
{
"epoch": 0.97,
"learning_rate": 0.00021570713391739672,
"loss": 0.7265,
"step": 550
},
{
"epoch": 0.99,
"learning_rate": 0.00021382978723404254,
"loss": 0.7389,
"step": 560
},
{
"epoch": 1.01,
"learning_rate": 0.00021195244055068833,
"loss": 0.7235,
"step": 570
},
{
"epoch": 1.02,
"learning_rate": 0.00021007509386733418,
"loss": 0.7195,
"step": 580
},
{
"epoch": 1.04,
"learning_rate": 0.00020819774718397997,
"loss": 0.7467,
"step": 590
},
{
"epoch": 1.06,
"learning_rate": 0.00020632040050062576,
"loss": 0.7179,
"step": 600
},
{
"epoch": 1.06,
"eval_loss": 0.7271679639816284,
"eval_runtime": 65.972,
"eval_samples_per_second": 30.316,
"eval_steps_per_second": 0.758,
"step": 600
},
{
"epoch": 1.08,
"learning_rate": 0.00020444305381727158,
"loss": 0.7306,
"step": 610
},
{
"epoch": 1.09,
"learning_rate": 0.00020256570713391738,
"loss": 0.7266,
"step": 620
},
{
"epoch": 1.11,
"learning_rate": 0.00020068836045056317,
"loss": 0.7293,
"step": 630
},
{
"epoch": 1.13,
"learning_rate": 0.000198811013767209,
"loss": 0.7313,
"step": 640
},
{
"epoch": 1.15,
"learning_rate": 0.00019693366708385478,
"loss": 0.7302,
"step": 650
},
{
"epoch": 1.16,
"learning_rate": 0.00019505632040050063,
"loss": 0.7182,
"step": 660
},
{
"epoch": 1.18,
"learning_rate": 0.00019317897371714642,
"loss": 0.7264,
"step": 670
},
{
"epoch": 1.2,
"learning_rate": 0.00019130162703379222,
"loss": 0.7261,
"step": 680
},
{
"epoch": 1.22,
"learning_rate": 0.00018942428035043804,
"loss": 0.7216,
"step": 690
},
{
"epoch": 1.24,
"learning_rate": 0.00018754693366708383,
"loss": 0.7318,
"step": 700
},
{
"epoch": 1.25,
"learning_rate": 0.00018566958698372962,
"loss": 0.7197,
"step": 710
},
{
"epoch": 1.27,
"learning_rate": 0.00018379224030037544,
"loss": 0.7201,
"step": 720
},
{
"epoch": 1.29,
"learning_rate": 0.00018191489361702126,
"loss": 0.7163,
"step": 730
},
{
"epoch": 1.31,
"learning_rate": 0.00018003754693366708,
"loss": 0.7252,
"step": 740
},
{
"epoch": 1.32,
"learning_rate": 0.00017816020025031287,
"loss": 0.7231,
"step": 750
},
{
"epoch": 1.34,
"learning_rate": 0.0001762828535669587,
"loss": 0.7358,
"step": 760
},
{
"epoch": 1.36,
"learning_rate": 0.0001744055068836045,
"loss": 0.7076,
"step": 770
},
{
"epoch": 1.38,
"learning_rate": 0.00017252816020025028,
"loss": 0.7073,
"step": 780
},
{
"epoch": 1.39,
"learning_rate": 0.0001706508135168961,
"loss": 0.721,
"step": 790
},
{
"epoch": 1.41,
"learning_rate": 0.00016877346683354192,
"loss": 0.7209,
"step": 800
},
{
"epoch": 1.41,
"eval_loss": 0.7222920060157776,
"eval_runtime": 66.0169,
"eval_samples_per_second": 30.295,
"eval_steps_per_second": 0.757,
"step": 800
},
{
"epoch": 1.43,
"learning_rate": 0.00016689612015018774,
"loss": 0.7134,
"step": 810
},
{
"epoch": 1.45,
"learning_rate": 0.00016501877346683353,
"loss": 0.7326,
"step": 820
},
{
"epoch": 1.46,
"learning_rate": 0.00016314142678347933,
"loss": 0.7132,
"step": 830
},
{
"epoch": 1.48,
"learning_rate": 0.00016126408010012515,
"loss": 0.7162,
"step": 840
},
{
"epoch": 1.5,
"learning_rate": 0.00015938673341677094,
"loss": 0.7037,
"step": 850
},
{
"epoch": 1.52,
"learning_rate": 0.00015750938673341673,
"loss": 0.7216,
"step": 860
},
{
"epoch": 1.54,
"learning_rate": 0.00015563204005006258,
"loss": 0.7031,
"step": 870
},
{
"epoch": 1.55,
"learning_rate": 0.00015375469336670837,
"loss": 0.7094,
"step": 880
},
{
"epoch": 1.57,
"learning_rate": 0.0001518773466833542,
"loss": 0.7218,
"step": 890
},
{
"epoch": 1.59,
"learning_rate": 0.00015,
"loss": 0.7235,
"step": 900
},
{
"epoch": 1.61,
"learning_rate": 0.00014812265331664578,
"loss": 0.717,
"step": 910
},
{
"epoch": 1.62,
"learning_rate": 0.0001462453066332916,
"loss": 0.7138,
"step": 920
},
{
"epoch": 1.64,
"learning_rate": 0.00014436795994993742,
"loss": 0.718,
"step": 930
},
{
"epoch": 1.66,
"learning_rate": 0.0001424906132665832,
"loss": 0.7191,
"step": 940
},
{
"epoch": 1.68,
"learning_rate": 0.000140613266583229,
"loss": 0.7205,
"step": 950
},
{
"epoch": 1.69,
"learning_rate": 0.00013873591989987483,
"loss": 0.7098,
"step": 960
},
{
"epoch": 1.71,
"learning_rate": 0.00013685857321652065,
"loss": 0.716,
"step": 970
},
{
"epoch": 1.73,
"learning_rate": 0.00013498122653316644,
"loss": 0.7155,
"step": 980
},
{
"epoch": 1.75,
"learning_rate": 0.00013310387984981226,
"loss": 0.7195,
"step": 990
},
{
"epoch": 1.76,
"learning_rate": 0.00013122653316645805,
"loss": 0.7184,
"step": 1000
},
{
"epoch": 1.76,
"eval_loss": 0.71805739402771,
"eval_runtime": 66.049,
"eval_samples_per_second": 30.281,
"eval_steps_per_second": 0.757,
"step": 1000
},
{
"epoch": 1.78,
"learning_rate": 0.00012934918648310387,
"loss": 0.7126,
"step": 1010
},
{
"epoch": 1.8,
"learning_rate": 0.00012747183979974967,
"loss": 0.7149,
"step": 1020
},
{
"epoch": 1.82,
"learning_rate": 0.00012559449311639549,
"loss": 0.7113,
"step": 1030
},
{
"epoch": 1.84,
"learning_rate": 0.00012371714643304128,
"loss": 0.7183,
"step": 1040
},
{
"epoch": 1.85,
"learning_rate": 0.0001218397997496871,
"loss": 0.7021,
"step": 1050
},
{
"epoch": 1.87,
"learning_rate": 0.00011996245306633289,
"loss": 0.7166,
"step": 1060
},
{
"epoch": 1.89,
"learning_rate": 0.00011808510638297871,
"loss": 0.7136,
"step": 1070
},
{
"epoch": 1.91,
"learning_rate": 0.00011620775969962452,
"loss": 0.7282,
"step": 1080
},
{
"epoch": 1.92,
"learning_rate": 0.00011433041301627033,
"loss": 0.7166,
"step": 1090
},
{
"epoch": 1.94,
"learning_rate": 0.00011245306633291615,
"loss": 0.7009,
"step": 1100
},
{
"epoch": 1.96,
"learning_rate": 0.00011057571964956194,
"loss": 0.7108,
"step": 1110
},
{
"epoch": 1.98,
"learning_rate": 0.00010869837296620774,
"loss": 0.7143,
"step": 1120
},
{
"epoch": 1.99,
"learning_rate": 0.00010682102628285355,
"loss": 0.7099,
"step": 1130
},
{
"epoch": 2.01,
"learning_rate": 0.00010494367959949937,
"loss": 0.7099,
"step": 1140
},
{
"epoch": 2.03,
"learning_rate": 0.00010306633291614518,
"loss": 0.7046,
"step": 1150
},
{
"epoch": 2.05,
"learning_rate": 0.00010118898623279097,
"loss": 0.7032,
"step": 1160
},
{
"epoch": 2.06,
"learning_rate": 9.931163954943679e-05,
"loss": 0.6998,
"step": 1170
},
{
"epoch": 2.08,
"learning_rate": 9.74342928660826e-05,
"loss": 0.708,
"step": 1180
},
{
"epoch": 2.1,
"learning_rate": 9.55569461827284e-05,
"loss": 0.7126,
"step": 1190
},
{
"epoch": 2.12,
"learning_rate": 9.36795994993742e-05,
"loss": 0.7059,
"step": 1200
},
{
"epoch": 2.12,
"eval_loss": 0.7155373692512512,
"eval_runtime": 66.0519,
"eval_samples_per_second": 30.279,
"eval_steps_per_second": 0.757,
"step": 1200
},
{
"epoch": 2.14,
"learning_rate": 9.180225281602002e-05,
"loss": 0.7009,
"step": 1210
},
{
"epoch": 2.15,
"learning_rate": 8.992490613266582e-05,
"loss": 0.7056,
"step": 1220
},
{
"epoch": 2.17,
"learning_rate": 8.804755944931163e-05,
"loss": 0.7053,
"step": 1230
},
{
"epoch": 2.19,
"learning_rate": 8.617021276595745e-05,
"loss": 0.7103,
"step": 1240
},
{
"epoch": 2.21,
"learning_rate": 8.429286608260324e-05,
"loss": 0.7141,
"step": 1250
},
{
"epoch": 2.22,
"learning_rate": 8.241551939924905e-05,
"loss": 0.7003,
"step": 1260
},
{
"epoch": 2.24,
"learning_rate": 8.053817271589486e-05,
"loss": 0.7046,
"step": 1270
},
{
"epoch": 2.26,
"learning_rate": 7.866082603254068e-05,
"loss": 0.7036,
"step": 1280
},
{
"epoch": 2.28,
"learning_rate": 7.678347934918648e-05,
"loss": 0.713,
"step": 1290
},
{
"epoch": 2.29,
"learning_rate": 7.490613266583228e-05,
"loss": 0.6905,
"step": 1300
},
{
"epoch": 2.31,
"learning_rate": 7.30287859824781e-05,
"loss": 0.7153,
"step": 1310
},
{
"epoch": 2.33,
"learning_rate": 7.11514392991239e-05,
"loss": 0.6982,
"step": 1320
},
{
"epoch": 2.35,
"learning_rate": 6.927409261576971e-05,
"loss": 0.7114,
"step": 1330
},
{
"epoch": 2.36,
"learning_rate": 6.739674593241552e-05,
"loss": 0.7029,
"step": 1340
},
{
"epoch": 2.38,
"learning_rate": 6.551939924906132e-05,
"loss": 0.706,
"step": 1350
},
{
"epoch": 2.4,
"learning_rate": 6.364205256570713e-05,
"loss": 0.7164,
"step": 1360
},
{
"epoch": 2.42,
"learning_rate": 6.176470588235294e-05,
"loss": 0.7028,
"step": 1370
},
{
"epoch": 2.44,
"learning_rate": 5.988735919899874e-05,
"loss": 0.7016,
"step": 1380
},
{
"epoch": 2.45,
"learning_rate": 5.801001251564455e-05,
"loss": 0.7032,
"step": 1390
},
{
"epoch": 2.47,
"learning_rate": 5.6132665832290355e-05,
"loss": 0.7126,
"step": 1400
},
{
"epoch": 2.47,
"eval_loss": 0.7135885953903198,
"eval_runtime": 66.3918,
"eval_samples_per_second": 30.124,
"eval_steps_per_second": 0.753,
"step": 1400
},
{
"epoch": 2.49,
"learning_rate": 5.425531914893617e-05,
"loss": 0.7115,
"step": 1410
},
{
"epoch": 2.51,
"learning_rate": 5.237797246558197e-05,
"loss": 0.7042,
"step": 1420
},
{
"epoch": 2.52,
"learning_rate": 5.050062578222778e-05,
"loss": 0.7121,
"step": 1430
},
{
"epoch": 2.54,
"learning_rate": 4.862327909887359e-05,
"loss": 0.7071,
"step": 1440
},
{
"epoch": 2.56,
"learning_rate": 4.6745932415519395e-05,
"loss": 0.6955,
"step": 1450
},
{
"epoch": 2.58,
"learning_rate": 4.48685857321652e-05,
"loss": 0.6995,
"step": 1460
},
{
"epoch": 2.59,
"learning_rate": 4.299123904881101e-05,
"loss": 0.7036,
"step": 1470
},
{
"epoch": 2.61,
"learning_rate": 4.111389236545682e-05,
"loss": 0.6904,
"step": 1480
},
{
"epoch": 2.63,
"learning_rate": 3.923654568210262e-05,
"loss": 0.7043,
"step": 1490
},
{
"epoch": 2.65,
"learning_rate": 3.735919899874843e-05,
"loss": 0.7077,
"step": 1500
},
{
"epoch": 2.66,
"learning_rate": 3.548185231539424e-05,
"loss": 0.709,
"step": 1510
},
{
"epoch": 2.68,
"learning_rate": 3.360450563204005e-05,
"loss": 0.7071,
"step": 1520
},
{
"epoch": 2.7,
"learning_rate": 3.1727158948685854e-05,
"loss": 0.6952,
"step": 1530
},
{
"epoch": 2.72,
"learning_rate": 2.9849812265331664e-05,
"loss": 0.6965,
"step": 1540
},
{
"epoch": 2.74,
"learning_rate": 2.797246558197747e-05,
"loss": 0.6973,
"step": 1550
},
{
"epoch": 2.75,
"learning_rate": 2.6095118898623277e-05,
"loss": 0.7217,
"step": 1560
},
{
"epoch": 2.77,
"learning_rate": 2.4217772215269084e-05,
"loss": 0.6943,
"step": 1570
},
{
"epoch": 2.79,
"learning_rate": 2.234042553191489e-05,
"loss": 0.7119,
"step": 1580
},
{
"epoch": 2.81,
"learning_rate": 2.0463078848560697e-05,
"loss": 0.7043,
"step": 1590
},
{
"epoch": 2.82,
"learning_rate": 1.8585732165206507e-05,
"loss": 0.6987,
"step": 1600
},
{
"epoch": 2.82,
"eval_loss": 0.7122384905815125,
"eval_runtime": 66.3755,
"eval_samples_per_second": 30.132,
"eval_steps_per_second": 0.753,
"step": 1600
}
],
"max_steps": 1698,
"num_train_epochs": 3,
"total_flos": 7.4070025488732e+18,
"trial_name": null,
"trial_params": null
}