sarch7040's picture
Optimized fine-tuned model
5b61575 verified
raw
history blame
16.2 kB
{
"best_metric": 1.7295050621032715,
"best_model_checkpoint": "./llama3-itihasa/checkpoint-4000",
"epoch": 1.9474196689386563,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024342745861733205,
"grad_norm": 0.798042893409729,
"learning_rate": 0.00012165450121654502,
"loss": 2.421,
"step": 50
},
{
"epoch": 0.04868549172346641,
"grad_norm": 0.6517059803009033,
"learning_rate": 0.00024330900243309004,
"loss": 2.0325,
"step": 100
},
{
"epoch": 0.0730282375851996,
"grad_norm": 1.047722339630127,
"learning_rate": 0.00036496350364963507,
"loss": 2.0045,
"step": 150
},
{
"epoch": 0.09737098344693282,
"grad_norm": 0.724214494228363,
"learning_rate": 0.00048661800486618007,
"loss": 1.9665,
"step": 200
},
{
"epoch": 0.12171372930866602,
"grad_norm": 0.7182360887527466,
"learning_rate": 0.0006082725060827251,
"loss": 1.9531,
"step": 250
},
{
"epoch": 0.1460564751703992,
"grad_norm": 0.741189181804657,
"learning_rate": 0.0007299270072992701,
"loss": 1.9566,
"step": 300
},
{
"epoch": 0.17039922103213243,
"grad_norm": 0.7193257212638855,
"learning_rate": 0.0008515815085158151,
"loss": 1.9536,
"step": 350
},
{
"epoch": 0.19474196689386564,
"grad_norm": 0.7618579864501953,
"learning_rate": 0.0009732360097323601,
"loss": 1.9642,
"step": 400
},
{
"epoch": 0.21908471275559882,
"grad_norm": 0.8242681622505188,
"learning_rate": 0.0009894509061401135,
"loss": 1.9899,
"step": 450
},
{
"epoch": 0.24342745861733203,
"grad_norm": 1.0397591590881348,
"learning_rate": 0.0009759264268325669,
"loss": 1.9819,
"step": 500
},
{
"epoch": 0.24342745861733203,
"eval_loss": 1.9741202592849731,
"eval_runtime": 452.9914,
"eval_samples_per_second": 3.022,
"eval_steps_per_second": 0.757,
"step": 500
},
{
"epoch": 0.26777020447906524,
"grad_norm": 0.904543936252594,
"learning_rate": 0.0009624019475250203,
"loss": 1.9513,
"step": 550
},
{
"epoch": 0.2921129503407984,
"grad_norm": 0.8555233478546143,
"learning_rate": 0.0009488774682174737,
"loss": 1.9543,
"step": 600
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.7415252923965454,
"learning_rate": 0.000935352988909927,
"loss": 1.9621,
"step": 650
},
{
"epoch": 0.34079844206426485,
"grad_norm": 0.7195931077003479,
"learning_rate": 0.0009218285096023803,
"loss": 1.9414,
"step": 700
},
{
"epoch": 0.36514118792599803,
"grad_norm": 0.9943011999130249,
"learning_rate": 0.0009083040302948337,
"loss": 1.9594,
"step": 750
},
{
"epoch": 0.3894839337877313,
"grad_norm": 0.7514945268630981,
"learning_rate": 0.000894779550987287,
"loss": 1.9278,
"step": 800
},
{
"epoch": 0.41382667964946446,
"grad_norm": 0.7884072661399841,
"learning_rate": 0.0008812550716797404,
"loss": 1.9345,
"step": 850
},
{
"epoch": 0.43816942551119764,
"grad_norm": 0.7612814903259277,
"learning_rate": 0.0008677305923721937,
"loss": 1.9081,
"step": 900
},
{
"epoch": 0.4625121713729309,
"grad_norm": 0.7805455327033997,
"learning_rate": 0.000854206113064647,
"loss": 1.9188,
"step": 950
},
{
"epoch": 0.48685491723466406,
"grad_norm": 0.7169475555419922,
"learning_rate": 0.0008406816337571004,
"loss": 1.8903,
"step": 1000
},
{
"epoch": 0.48685491723466406,
"eval_loss": 1.919243335723877,
"eval_runtime": 452.9281,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 1000
},
{
"epoch": 0.5111976630963972,
"grad_norm": 0.7700228095054626,
"learning_rate": 0.0008271571544495538,
"loss": 1.8996,
"step": 1050
},
{
"epoch": 0.5355404089581305,
"grad_norm": 0.8165948390960693,
"learning_rate": 0.000813632675142007,
"loss": 1.9178,
"step": 1100
},
{
"epoch": 0.5598831548198637,
"grad_norm": 0.6771554350852966,
"learning_rate": 0.0008001081958344604,
"loss": 1.9012,
"step": 1150
},
{
"epoch": 0.5842259006815969,
"grad_norm": 0.7270589470863342,
"learning_rate": 0.0007865837165269137,
"loss": 1.8971,
"step": 1200
},
{
"epoch": 0.6085686465433301,
"grad_norm": 0.7590579390525818,
"learning_rate": 0.0007730592372193671,
"loss": 1.8842,
"step": 1250
},
{
"epoch": 0.6329113924050633,
"grad_norm": 0.642905592918396,
"learning_rate": 0.0007595347579118204,
"loss": 1.8661,
"step": 1300
},
{
"epoch": 0.6572541382667965,
"grad_norm": 0.7350940704345703,
"learning_rate": 0.0007460102786042737,
"loss": 1.8792,
"step": 1350
},
{
"epoch": 0.6815968841285297,
"grad_norm": 0.7721772193908691,
"learning_rate": 0.0007324857992967271,
"loss": 1.8627,
"step": 1400
},
{
"epoch": 0.7059396299902629,
"grad_norm": 0.7251712083816528,
"learning_rate": 0.0007189613199891805,
"loss": 1.8715,
"step": 1450
},
{
"epoch": 0.7302823758519961,
"grad_norm": 0.6874309778213501,
"learning_rate": 0.0007054368406816337,
"loss": 1.8491,
"step": 1500
},
{
"epoch": 0.7302823758519961,
"eval_loss": 1.8761951923370361,
"eval_runtime": 452.8961,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 1500
},
{
"epoch": 0.7546251217137293,
"grad_norm": 0.7240548729896545,
"learning_rate": 0.000691912361374087,
"loss": 1.8585,
"step": 1550
},
{
"epoch": 0.7789678675754625,
"grad_norm": 0.6931944489479065,
"learning_rate": 0.0006783878820665405,
"loss": 1.8623,
"step": 1600
},
{
"epoch": 0.8033106134371957,
"grad_norm": 0.710396409034729,
"learning_rate": 0.0006648634027589938,
"loss": 1.8467,
"step": 1650
},
{
"epoch": 0.8276533592989289,
"grad_norm": 0.7441169619560242,
"learning_rate": 0.0006513389234514471,
"loss": 1.857,
"step": 1700
},
{
"epoch": 0.8519961051606622,
"grad_norm": 0.752155065536499,
"learning_rate": 0.0006378144441439005,
"loss": 1.8404,
"step": 1750
},
{
"epoch": 0.8763388510223953,
"grad_norm": 0.6920920014381409,
"learning_rate": 0.0006242899648363538,
"loss": 1.8331,
"step": 1800
},
{
"epoch": 0.9006815968841285,
"grad_norm": 0.6669381260871887,
"learning_rate": 0.0006107654855288071,
"loss": 1.8466,
"step": 1850
},
{
"epoch": 0.9250243427458618,
"grad_norm": 0.6877081394195557,
"learning_rate": 0.0005972410062212606,
"loss": 1.829,
"step": 1900
},
{
"epoch": 0.9493670886075949,
"grad_norm": 0.7491869926452637,
"learning_rate": 0.0005837165269137138,
"loss": 1.8331,
"step": 1950
},
{
"epoch": 0.9737098344693281,
"grad_norm": 0.7513737082481384,
"learning_rate": 0.0005701920476061671,
"loss": 1.8106,
"step": 2000
},
{
"epoch": 0.9737098344693281,
"eval_loss": 1.8369135856628418,
"eval_runtime": 452.8951,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 2000
},
{
"epoch": 0.9980525803310614,
"grad_norm": 0.7290380001068115,
"learning_rate": 0.0005566675682986206,
"loss": 1.8111,
"step": 2050
},
{
"epoch": 1.0223953261927945,
"grad_norm": 0.6029945611953735,
"learning_rate": 0.0005431430889910739,
"loss": 1.7597,
"step": 2100
},
{
"epoch": 1.0467380720545278,
"grad_norm": 0.6136703491210938,
"learning_rate": 0.0005296186096835271,
"loss": 1.7358,
"step": 2150
},
{
"epoch": 1.071080817916261,
"grad_norm": 0.6262673139572144,
"learning_rate": 0.0005160941303759806,
"loss": 1.742,
"step": 2200
},
{
"epoch": 1.095423563777994,
"grad_norm": 0.7081790566444397,
"learning_rate": 0.0005025696510684339,
"loss": 1.7594,
"step": 2250
},
{
"epoch": 1.1197663096397275,
"grad_norm": 0.6639809608459473,
"learning_rate": 0.0004890451717608872,
"loss": 1.7558,
"step": 2300
},
{
"epoch": 1.1441090555014606,
"grad_norm": 0.7874799370765686,
"learning_rate": 0.00047552069245334054,
"loss": 1.7365,
"step": 2350
},
{
"epoch": 1.1684518013631937,
"grad_norm": 0.6873753666877747,
"learning_rate": 0.0004619962131457939,
"loss": 1.7391,
"step": 2400
},
{
"epoch": 1.192794547224927,
"grad_norm": 0.7038766741752625,
"learning_rate": 0.00044847173383824726,
"loss": 1.7223,
"step": 2450
},
{
"epoch": 1.2171372930866602,
"grad_norm": 0.719927191734314,
"learning_rate": 0.00043494725453070053,
"loss": 1.7183,
"step": 2500
},
{
"epoch": 1.2171372930866602,
"eval_loss": 1.80846107006073,
"eval_runtime": 452.923,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 2500
},
{
"epoch": 1.2414800389483933,
"grad_norm": 0.62213134765625,
"learning_rate": 0.0004214227752231539,
"loss": 1.7484,
"step": 2550
},
{
"epoch": 1.2658227848101267,
"grad_norm": 0.6441882252693176,
"learning_rate": 0.00040789829591560725,
"loss": 1.7351,
"step": 2600
},
{
"epoch": 1.2901655306718598,
"grad_norm": 0.6303529739379883,
"learning_rate": 0.0003943738166080606,
"loss": 1.7175,
"step": 2650
},
{
"epoch": 1.314508276533593,
"grad_norm": 0.6477542519569397,
"learning_rate": 0.00038084933730051396,
"loss": 1.7431,
"step": 2700
},
{
"epoch": 1.3388510223953263,
"grad_norm": 0.6949708461761475,
"learning_rate": 0.0003673248579929673,
"loss": 1.7145,
"step": 2750
},
{
"epoch": 1.3631937682570594,
"grad_norm": 0.681567907333374,
"learning_rate": 0.0003538003786854206,
"loss": 1.7041,
"step": 2800
},
{
"epoch": 1.3875365141187925,
"grad_norm": 0.6230831146240234,
"learning_rate": 0.00034027589937787395,
"loss": 1.7028,
"step": 2850
},
{
"epoch": 1.4118792599805259,
"grad_norm": 0.6681589484214783,
"learning_rate": 0.00032675142007032733,
"loss": 1.7034,
"step": 2900
},
{
"epoch": 1.436222005842259,
"grad_norm": 0.5797551274299622,
"learning_rate": 0.0003132269407627806,
"loss": 1.705,
"step": 2950
},
{
"epoch": 1.4605647517039921,
"grad_norm": 0.6354831457138062,
"learning_rate": 0.000299702461455234,
"loss": 1.7073,
"step": 3000
},
{
"epoch": 1.4605647517039921,
"eval_loss": 1.7770596742630005,
"eval_runtime": 452.7916,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.758,
"step": 3000
},
{
"epoch": 1.4849074975657255,
"grad_norm": 0.6392273306846619,
"learning_rate": 0.0002861779821476873,
"loss": 1.6905,
"step": 3050
},
{
"epoch": 1.5092502434274586,
"grad_norm": 0.6308421492576599,
"learning_rate": 0.00027265350284014065,
"loss": 1.6881,
"step": 3100
},
{
"epoch": 1.533592989289192,
"grad_norm": 0.6558692455291748,
"learning_rate": 0.000259129023532594,
"loss": 1.6928,
"step": 3150
},
{
"epoch": 1.557935735150925,
"grad_norm": 0.5978163480758667,
"learning_rate": 0.0002456045442250473,
"loss": 1.6833,
"step": 3200
},
{
"epoch": 1.5822784810126582,
"grad_norm": 0.6277039051055908,
"learning_rate": 0.0002320800649175007,
"loss": 1.6819,
"step": 3250
},
{
"epoch": 1.6066212268743914,
"grad_norm": 0.6343510746955872,
"learning_rate": 0.00021855558560995403,
"loss": 1.6876,
"step": 3300
},
{
"epoch": 1.6309639727361245,
"grad_norm": 0.5587642192840576,
"learning_rate": 0.00020503110630240738,
"loss": 1.6881,
"step": 3350
},
{
"epoch": 1.6553067185978578,
"grad_norm": 0.6643992066383362,
"learning_rate": 0.0001915066269948607,
"loss": 1.6897,
"step": 3400
},
{
"epoch": 1.6796494644595912,
"grad_norm": 0.6108680367469788,
"learning_rate": 0.00017798214768731404,
"loss": 1.6712,
"step": 3450
},
{
"epoch": 1.7039922103213243,
"grad_norm": 0.5982262492179871,
"learning_rate": 0.0001644576683797674,
"loss": 1.6737,
"step": 3500
},
{
"epoch": 1.7039922103213243,
"eval_loss": 1.7497289180755615,
"eval_runtime": 452.9263,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 3500
},
{
"epoch": 1.7283349561830574,
"grad_norm": 0.5886790156364441,
"learning_rate": 0.00015093318907222073,
"loss": 1.6783,
"step": 3550
},
{
"epoch": 1.7526777020447906,
"grad_norm": 0.6495505571365356,
"learning_rate": 0.00013740870976467406,
"loss": 1.6753,
"step": 3600
},
{
"epoch": 1.7770204479065237,
"grad_norm": 0.5986368656158447,
"learning_rate": 0.00012388423045712742,
"loss": 1.6758,
"step": 3650
},
{
"epoch": 1.801363193768257,
"grad_norm": 0.5811861753463745,
"learning_rate": 0.00011035975114958075,
"loss": 1.66,
"step": 3700
},
{
"epoch": 1.8257059396299904,
"grad_norm": 0.5493278503417969,
"learning_rate": 9.683527184203409e-05,
"loss": 1.6574,
"step": 3750
},
{
"epoch": 1.8500486854917235,
"grad_norm": 0.558631181716919,
"learning_rate": 8.331079253448742e-05,
"loss": 1.68,
"step": 3800
},
{
"epoch": 1.8743914313534566,
"grad_norm": 0.6028789281845093,
"learning_rate": 6.978631322694076e-05,
"loss": 1.661,
"step": 3850
},
{
"epoch": 1.8987341772151898,
"grad_norm": 0.5945218801498413,
"learning_rate": 5.6261833919394105e-05,
"loss": 1.6498,
"step": 3900
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.5483628511428833,
"learning_rate": 4.273735461184745e-05,
"loss": 1.6497,
"step": 3950
},
{
"epoch": 1.9474196689386563,
"grad_norm": 0.5737510323524475,
"learning_rate": 2.9212875304300785e-05,
"loss": 1.6584,
"step": 4000
},
{
"epoch": 1.9474196689386563,
"eval_loss": 1.7295050621032715,
"eval_runtime": 452.9477,
"eval_samples_per_second": 3.022,
"eval_steps_per_second": 0.757,
"step": 4000
}
],
"logging_steps": 50,
"max_steps": 4108,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 7.390810477481165e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}