llmTechChat-lora / checkpoint-160 /trainer_state.json
Epiculous's picture
Upload folder using huggingface_hub
475bc99 verified
{
"best_metric": 1.911091685295105,
"best_model_checkpoint": "./llmTechChat-lora/checkpoint-160",
"epoch": 0.9976617303195635,
"eval_steps": 40,
"global_step": 160,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.9999999999999997e-05,
"loss": 4.3577,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 4.326064109802246,
"eval_runtime": 288.7431,
"eval_samples_per_second": 206.796,
"eval_steps_per_second": 206.796,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 5.9999999999999995e-05,
"loss": 4.2951,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 8.999999999999999e-05,
"loss": 3.9156,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 0.00011999999999999999,
"loss": 3.4836,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 0.00015,
"loss": 3.1743,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 0.00017999999999999998,
"loss": 2.8242,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 0.00020999999999999998,
"loss": 2.7478,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 0.00023999999999999998,
"loss": 2.7198,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 0.00027,
"loss": 2.6025,
"step": 9
},
{
"epoch": 0.06,
"learning_rate": 0.0003,
"loss": 2.5337,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 0.00029999813499925374,
"loss": 2.5019,
"step": 11
},
{
"epoch": 0.07,
"learning_rate": 0.0002999925400433914,
"loss": 2.3393,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 0.00029998321527154097,
"loss": 2.3796,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 0.0002999701609155785,
"loss": 2.3054,
"step": 14
},
{
"epoch": 0.09,
"learning_rate": 0.0002999533773001224,
"loss": 2.2898,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 0.00029993286484252544,
"loss": 2.2254,
"step": 16
},
{
"epoch": 0.11,
"learning_rate": 0.00029990862405286433,
"loss": 2.2197,
"step": 17
},
{
"epoch": 0.11,
"learning_rate": 0.0002998806555339269,
"loss": 2.1797,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 0.0002998489599811972,
"loss": 2.1854,
"step": 19
},
{
"epoch": 0.12,
"learning_rate": 0.0002998135381828383,
"loss": 2.2105,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 0.00029977439101967274,
"loss": 2.1866,
"step": 21
},
{
"epoch": 0.14,
"learning_rate": 0.00029973151946516025,
"loss": 2.1718,
"step": 22
},
{
"epoch": 0.14,
"learning_rate": 0.0002996849245853739,
"loss": 2.1158,
"step": 23
},
{
"epoch": 0.15,
"learning_rate": 0.0002996346075389736,
"loss": 2.1495,
"step": 24
},
{
"epoch": 0.16,
"learning_rate": 0.00029958056957717696,
"loss": 2.1326,
"step": 25
},
{
"epoch": 0.16,
"learning_rate": 0.00029952281204372863,
"loss": 2.1391,
"step": 26
},
{
"epoch": 0.17,
"learning_rate": 0.0002994613363748664,
"loss": 2.1039,
"step": 27
},
{
"epoch": 0.17,
"learning_rate": 0.00029939614409928584,
"loss": 2.132,
"step": 28
},
{
"epoch": 0.18,
"learning_rate": 0.00029932723683810225,
"loss": 2.1278,
"step": 29
},
{
"epoch": 0.19,
"learning_rate": 0.0002992546163048102,
"loss": 2.0698,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 0.00029917828430524096,
"loss": 2.0757,
"step": 31
},
{
"epoch": 0.2,
"learning_rate": 0.0002990982427375177,
"loss": 2.0689,
"step": 32
},
{
"epoch": 0.21,
"learning_rate": 0.0002990144935920083,
"loss": 2.0986,
"step": 33
},
{
"epoch": 0.21,
"learning_rate": 0.0002989270389512756,
"loss": 2.058,
"step": 34
},
{
"epoch": 0.22,
"learning_rate": 0.0002988358809900258,
"loss": 2.0451,
"step": 35
},
{
"epoch": 0.22,
"learning_rate": 0.00029874102197505447,
"loss": 2.0613,
"step": 36
},
{
"epoch": 0.23,
"learning_rate": 0.0002986424642651902,
"loss": 2.0796,
"step": 37
},
{
"epoch": 0.24,
"learning_rate": 0.0002985402103112355,
"loss": 2.086,
"step": 38
},
{
"epoch": 0.24,
"learning_rate": 0.00029843426265590656,
"loss": 2.0275,
"step": 39
},
{
"epoch": 0.25,
"learning_rate": 0.0002983246239337692,
"loss": 2.0615,
"step": 40
},
{
"epoch": 0.25,
"eval_loss": 2.0476396083831787,
"eval_runtime": 289.9408,
"eval_samples_per_second": 205.942,
"eval_steps_per_second": 205.942,
"step": 40
},
{
"epoch": 0.26,
"learning_rate": 0.0002982112968711744,
"loss": 2.1012,
"step": 41
},
{
"epoch": 0.26,
"learning_rate": 0.0002980942842861893,
"loss": 2.0537,
"step": 42
},
{
"epoch": 0.27,
"learning_rate": 0.00029797358908852816,
"loss": 2.0595,
"step": 43
},
{
"epoch": 0.27,
"learning_rate": 0.00029784921427947946,
"loss": 2.0409,
"step": 44
},
{
"epoch": 0.28,
"learning_rate": 0.0002977211629518312,
"loss": 2.0045,
"step": 45
},
{
"epoch": 0.29,
"learning_rate": 0.00029758943828979444,
"loss": 2.0176,
"step": 46
},
{
"epoch": 0.29,
"learning_rate": 0.0002974540435689237,
"loss": 2.0189,
"step": 47
},
{
"epoch": 0.3,
"learning_rate": 0.0002973149821560358,
"loss": 2.0169,
"step": 48
},
{
"epoch": 0.31,
"learning_rate": 0.00029717225750912585,
"loss": 2.0553,
"step": 49
},
{
"epoch": 0.31,
"learning_rate": 0.00029702587317728153,
"loss": 2.0569,
"step": 50
},
{
"epoch": 0.32,
"learning_rate": 0.0002968758328005947,
"loss": 2.0522,
"step": 51
},
{
"epoch": 0.32,
"learning_rate": 0.0002967221401100708,
"loss": 2.0285,
"step": 52
},
{
"epoch": 0.33,
"learning_rate": 0.00029656479892753635,
"loss": 2.0266,
"step": 53
},
{
"epoch": 0.34,
"learning_rate": 0.0002964038131655436,
"loss": 2.0161,
"step": 54
},
{
"epoch": 0.34,
"learning_rate": 0.0002962391868272735,
"loss": 2.0122,
"step": 55
},
{
"epoch": 0.35,
"learning_rate": 0.00029607092400643593,
"loss": 1.9926,
"step": 56
},
{
"epoch": 0.36,
"learning_rate": 0.000295899028887168,
"loss": 2.0123,
"step": 57
},
{
"epoch": 0.36,
"learning_rate": 0.0002957235057439301,
"loss": 2.0121,
"step": 58
},
{
"epoch": 0.37,
"learning_rate": 0.0002955443589413994,
"loss": 2.0245,
"step": 59
},
{
"epoch": 0.37,
"learning_rate": 0.00029536159293436166,
"loss": 2.0127,
"step": 60
},
{
"epoch": 0.38,
"learning_rate": 0.0002951752122676,
"loss": 2.0057,
"step": 61
},
{
"epoch": 0.39,
"learning_rate": 0.000294985221575782,
"loss": 2.0226,
"step": 62
},
{
"epoch": 0.39,
"learning_rate": 0.0002947916255833451,
"loss": 2.0032,
"step": 63
},
{
"epoch": 0.4,
"learning_rate": 0.00029459442910437797,
"loss": 2.045,
"step": 64
},
{
"epoch": 0.41,
"learning_rate": 0.00029439363704250176,
"loss": 1.9794,
"step": 65
},
{
"epoch": 0.41,
"learning_rate": 0.0002941892543907478,
"loss": 2.0009,
"step": 66
},
{
"epoch": 0.42,
"learning_rate": 0.0002939812862314333,
"loss": 1.9508,
"step": 67
},
{
"epoch": 0.42,
"learning_rate": 0.00029376973773603533,
"loss": 1.9913,
"step": 68
},
{
"epoch": 0.43,
"learning_rate": 0.0002935546141650618,
"loss": 1.9762,
"step": 69
},
{
"epoch": 0.44,
"learning_rate": 0.00029333592086792107,
"loss": 2.0,
"step": 70
},
{
"epoch": 0.44,
"learning_rate": 0.0002931136632827886,
"loss": 1.9629,
"step": 71
},
{
"epoch": 0.45,
"learning_rate": 0.0002928878469364719,
"loss": 2.0009,
"step": 72
},
{
"epoch": 0.46,
"learning_rate": 0.00029265847744427303,
"loss": 1.9714,
"step": 73
},
{
"epoch": 0.46,
"learning_rate": 0.0002924255605098489,
"loss": 1.9474,
"step": 74
},
{
"epoch": 0.47,
"learning_rate": 0.0002921891019250697,
"loss": 1.9959,
"step": 75
},
{
"epoch": 0.47,
"learning_rate": 0.0002919491075698746,
"loss": 1.9846,
"step": 76
},
{
"epoch": 0.48,
"learning_rate": 0.00029170558341212554,
"loss": 1.9978,
"step": 77
},
{
"epoch": 0.49,
"learning_rate": 0.00029145853550745904,
"loss": 1.9527,
"step": 78
},
{
"epoch": 0.49,
"learning_rate": 0.00029120796999913546,
"loss": 1.9585,
"step": 79
},
{
"epoch": 0.5,
"learning_rate": 0.0002909538931178862,
"loss": 1.9905,
"step": 80
},
{
"epoch": 0.5,
"eval_loss": 1.96906578540802,
"eval_runtime": 291.8459,
"eval_samples_per_second": 204.598,
"eval_steps_per_second": 204.598,
"step": 80
},
{
"epoch": 0.51,
"learning_rate": 0.00029069631118175903,
"loss": 1.9926,
"step": 81
},
{
"epoch": 0.51,
"learning_rate": 0.00029043523059596053,
"loss": 1.9916,
"step": 82
},
{
"epoch": 0.52,
"learning_rate": 0.0002901706578526973,
"loss": 1.9545,
"step": 83
},
{
"epoch": 0.52,
"learning_rate": 0.0002899025995310141,
"loss": 1.9399,
"step": 84
},
{
"epoch": 0.53,
"learning_rate": 0.00028963106229663063,
"loss": 1.9515,
"step": 85
},
{
"epoch": 0.54,
"learning_rate": 0.00028935605290177535,
"loss": 1.9855,
"step": 86
},
{
"epoch": 0.54,
"learning_rate": 0.0002890775781850181,
"loss": 2.0159,
"step": 87
},
{
"epoch": 0.55,
"learning_rate": 0.00028879564507109946,
"loss": 1.9885,
"step": 88
},
{
"epoch": 0.55,
"learning_rate": 0.00028851026057075916,
"loss": 1.9625,
"step": 89
},
{
"epoch": 0.56,
"learning_rate": 0.00028822143178056114,
"loss": 1.9161,
"step": 90
},
{
"epoch": 0.57,
"learning_rate": 0.0002879291658827176,
"loss": 1.9141,
"step": 91
},
{
"epoch": 0.57,
"learning_rate": 0.00028763347014491,
"loss": 1.9867,
"step": 92
},
{
"epoch": 0.58,
"learning_rate": 0.00028733435192010887,
"loss": 1.9325,
"step": 93
},
{
"epoch": 0.59,
"learning_rate": 0.0002870318186463901,
"loss": 1.9517,
"step": 94
},
{
"epoch": 0.59,
"learning_rate": 0.00028672587784675096,
"loss": 1.9435,
"step": 95
},
{
"epoch": 0.6,
"learning_rate": 0.0002864165371289223,
"loss": 1.9428,
"step": 96
},
{
"epoch": 0.6,
"learning_rate": 0.0002861038041851797,
"loss": 1.9182,
"step": 97
},
{
"epoch": 0.61,
"learning_rate": 0.0002857876867921522,
"loss": 1.9344,
"step": 98
},
{
"epoch": 0.62,
"learning_rate": 0.0002854681928106287,
"loss": 1.9652,
"step": 99
},
{
"epoch": 0.62,
"learning_rate": 0.0002851453301853628,
"loss": 1.9332,
"step": 100
},
{
"epoch": 0.63,
"learning_rate": 0.000284819106944875,
"loss": 1.9042,
"step": 101
},
{
"epoch": 0.64,
"learning_rate": 0.0002844895312012531,
"loss": 1.9571,
"step": 102
},
{
"epoch": 0.64,
"learning_rate": 0.0002841566111499505,
"loss": 1.9129,
"step": 103
},
{
"epoch": 0.65,
"learning_rate": 0.0002838203550695825,
"loss": 1.9347,
"step": 104
},
{
"epoch": 0.65,
"learning_rate": 0.00028348077132172027,
"loss": 1.9461,
"step": 105
},
{
"epoch": 0.66,
"learning_rate": 0.0002831378683506831,
"loss": 1.9188,
"step": 106
},
{
"epoch": 0.67,
"learning_rate": 0.00028279165468332823,
"loss": 1.9491,
"step": 107
},
{
"epoch": 0.67,
"learning_rate": 0.000282442138928839,
"loss": 1.961,
"step": 108
},
{
"epoch": 0.68,
"learning_rate": 0.00028208932977851067,
"loss": 1.9048,
"step": 109
},
{
"epoch": 0.69,
"learning_rate": 0.0002817332360055343,
"loss": 1.9493,
"step": 110
},
{
"epoch": 0.69,
"learning_rate": 0.0002813738664647784,
"loss": 1.9685,
"step": 111
},
{
"epoch": 0.7,
"learning_rate": 0.00028101123009256947,
"loss": 1.9054,
"step": 112
},
{
"epoch": 0.7,
"learning_rate": 0.0002806453359064686,
"loss": 1.9317,
"step": 113
},
{
"epoch": 0.71,
"learning_rate": 0.00028027619300504834,
"loss": 1.9701,
"step": 114
},
{
"epoch": 0.72,
"learning_rate": 0.0002799038105676658,
"loss": 1.9426,
"step": 115
},
{
"epoch": 0.72,
"learning_rate": 0.0002795281978542346,
"loss": 1.957,
"step": 116
},
{
"epoch": 0.73,
"learning_rate": 0.0002791493642049947,
"loss": 1.9535,
"step": 117
},
{
"epoch": 0.74,
"learning_rate": 0.0002787673190402799,
"loss": 1.9045,
"step": 118
},
{
"epoch": 0.74,
"learning_rate": 0.00027838207186028376,
"loss": 1.9575,
"step": 119
},
{
"epoch": 0.75,
"learning_rate": 0.0002779936322448233,
"loss": 1.8699,
"step": 120
},
{
"epoch": 0.75,
"eval_loss": 1.9343771934509277,
"eval_runtime": 292.8194,
"eval_samples_per_second": 203.917,
"eval_steps_per_second": 203.917,
"step": 120
},
{
"epoch": 0.75,
"learning_rate": 0.0002776020098531009,
"loss": 1.956,
"step": 121
},
{
"epoch": 0.76,
"learning_rate": 0.00027720721442346387,
"loss": 1.8958,
"step": 122
},
{
"epoch": 0.77,
"learning_rate": 0.0002768092557731625,
"loss": 1.9157,
"step": 123
},
{
"epoch": 0.77,
"learning_rate": 0.00027640814379810587,
"loss": 1.9118,
"step": 124
},
{
"epoch": 0.78,
"learning_rate": 0.0002760038884726157,
"loss": 1.9707,
"step": 125
},
{
"epoch": 0.79,
"learning_rate": 0.0002755964998491785,
"loss": 1.9563,
"step": 126
},
{
"epoch": 0.79,
"learning_rate": 0.0002751859880581954,
"loss": 1.9825,
"step": 127
},
{
"epoch": 0.8,
"learning_rate": 0.0002747723633077303,
"loss": 1.9687,
"step": 128
},
{
"epoch": 0.8,
"learning_rate": 0.0002743556358832562,
"loss": 1.9378,
"step": 129
},
{
"epoch": 0.81,
"learning_rate": 0.00027393581614739923,
"loss": 1.9307,
"step": 130
},
{
"epoch": 0.82,
"learning_rate": 0.00027351291453968086,
"loss": 1.9333,
"step": 131
},
{
"epoch": 0.82,
"learning_rate": 0.0002730869415762587,
"loss": 1.9229,
"step": 132
},
{
"epoch": 0.83,
"learning_rate": 0.0002726579078496647,
"loss": 1.911,
"step": 133
},
{
"epoch": 0.84,
"learning_rate": 0.00027222582402854176,
"loss": 1.9556,
"step": 134
},
{
"epoch": 0.84,
"learning_rate": 0.0002717907008573785,
"loss": 1.9008,
"step": 135
},
{
"epoch": 0.85,
"learning_rate": 0.0002713525491562421,
"loss": 1.9651,
"step": 136
},
{
"epoch": 0.85,
"learning_rate": 0.0002709113798205093,
"loss": 1.9337,
"step": 137
},
{
"epoch": 0.86,
"learning_rate": 0.00027046720382059526,
"loss": 1.9485,
"step": 138
},
{
"epoch": 0.87,
"learning_rate": 0.00027002003220168093,
"loss": 1.8647,
"step": 139
},
{
"epoch": 0.87,
"learning_rate": 0.0002695698760834384,
"loss": 1.9288,
"step": 140
},
{
"epoch": 0.88,
"learning_rate": 0.00026911674665975417,
"loss": 1.9535,
"step": 141
},
{
"epoch": 0.89,
"learning_rate": 0.0002686606551984512,
"loss": 1.932,
"step": 142
},
{
"epoch": 0.89,
"learning_rate": 0.00026820161304100823,
"loss": 1.9516,
"step": 143
},
{
"epoch": 0.9,
"learning_rate": 0.0002677396316022783,
"loss": 1.9347,
"step": 144
},
{
"epoch": 0.9,
"learning_rate": 0.00026727472237020447,
"loss": 1.9473,
"step": 145
},
{
"epoch": 0.91,
"learning_rate": 0.0002668068969055341,
"loss": 1.9428,
"step": 146
},
{
"epoch": 0.92,
"learning_rate": 0.0002663361668415318,
"loss": 1.9204,
"step": 147
},
{
"epoch": 0.92,
"learning_rate": 0.0002658625438836899,
"loss": 1.9039,
"step": 148
},
{
"epoch": 0.93,
"learning_rate": 0.0002653860398094373,
"loss": 1.9166,
"step": 149
},
{
"epoch": 0.94,
"learning_rate": 0.00026490666646784665,
"loss": 1.9072,
"step": 150
},
{
"epoch": 0.94,
"learning_rate": 0.00026442443577933994,
"loss": 1.9014,
"step": 151
},
{
"epoch": 0.95,
"learning_rate": 0.0002639393597353917,
"loss": 1.9272,
"step": 152
},
{
"epoch": 0.95,
"learning_rate": 0.00026345145039823097,
"loss": 1.9274,
"step": 153
},
{
"epoch": 0.96,
"learning_rate": 0.00026296071990054165,
"loss": 1.9548,
"step": 154
},
{
"epoch": 0.97,
"learning_rate": 0.0002624671804451601,
"loss": 1.928,
"step": 155
},
{
"epoch": 0.97,
"learning_rate": 0.0002619708443047725,
"loss": 1.9072,
"step": 156
},
{
"epoch": 0.98,
"learning_rate": 0.00026147172382160914,
"loss": 1.9116,
"step": 157
},
{
"epoch": 0.99,
"learning_rate": 0.0002609698314071376,
"loss": 1.915,
"step": 158
},
{
"epoch": 0.99,
"learning_rate": 0.0002604651795417543,
"loss": 1.915,
"step": 159
},
{
"epoch": 1.0,
"learning_rate": 0.0002599577807744739,
"loss": 1.9604,
"step": 160
},
{
"epoch": 1.0,
"eval_loss": 1.911091685295105,
"eval_runtime": 289.5613,
"eval_samples_per_second": 206.212,
"eval_steps_per_second": 206.212,
"step": 160
}
],
"logging_steps": 1,
"max_steps": 640,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 160,
"total_flos": 4.6847467450269696e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}