sarch7040's picture
Optimized fine-tuned model
5b61575 verified
raw
history blame
8.34 kB
{
"best_metric": 1.8369135856628418,
"best_model_checkpoint": "./llama3-itihasa/checkpoint-2000",
"epoch": 0.9737098344693281,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024342745861733205,
"grad_norm": 0.798042893409729,
"learning_rate": 0.00012165450121654502,
"loss": 2.421,
"step": 50
},
{
"epoch": 0.04868549172346641,
"grad_norm": 0.6517059803009033,
"learning_rate": 0.00024330900243309004,
"loss": 2.0325,
"step": 100
},
{
"epoch": 0.0730282375851996,
"grad_norm": 1.047722339630127,
"learning_rate": 0.00036496350364963507,
"loss": 2.0045,
"step": 150
},
{
"epoch": 0.09737098344693282,
"grad_norm": 0.724214494228363,
"learning_rate": 0.00048661800486618007,
"loss": 1.9665,
"step": 200
},
{
"epoch": 0.12171372930866602,
"grad_norm": 0.7182360887527466,
"learning_rate": 0.0006082725060827251,
"loss": 1.9531,
"step": 250
},
{
"epoch": 0.1460564751703992,
"grad_norm": 0.741189181804657,
"learning_rate": 0.0007299270072992701,
"loss": 1.9566,
"step": 300
},
{
"epoch": 0.17039922103213243,
"grad_norm": 0.7193257212638855,
"learning_rate": 0.0008515815085158151,
"loss": 1.9536,
"step": 350
},
{
"epoch": 0.19474196689386564,
"grad_norm": 0.7618579864501953,
"learning_rate": 0.0009732360097323601,
"loss": 1.9642,
"step": 400
},
{
"epoch": 0.21908471275559882,
"grad_norm": 0.8242681622505188,
"learning_rate": 0.0009894509061401135,
"loss": 1.9899,
"step": 450
},
{
"epoch": 0.24342745861733203,
"grad_norm": 1.0397591590881348,
"learning_rate": 0.0009759264268325669,
"loss": 1.9819,
"step": 500
},
{
"epoch": 0.24342745861733203,
"eval_loss": 1.9741202592849731,
"eval_runtime": 452.9914,
"eval_samples_per_second": 3.022,
"eval_steps_per_second": 0.757,
"step": 500
},
{
"epoch": 0.26777020447906524,
"grad_norm": 0.904543936252594,
"learning_rate": 0.0009624019475250203,
"loss": 1.9513,
"step": 550
},
{
"epoch": 0.2921129503407984,
"grad_norm": 0.8555233478546143,
"learning_rate": 0.0009488774682174737,
"loss": 1.9543,
"step": 600
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.7415252923965454,
"learning_rate": 0.000935352988909927,
"loss": 1.9621,
"step": 650
},
{
"epoch": 0.34079844206426485,
"grad_norm": 0.7195931077003479,
"learning_rate": 0.0009218285096023803,
"loss": 1.9414,
"step": 700
},
{
"epoch": 0.36514118792599803,
"grad_norm": 0.9943011999130249,
"learning_rate": 0.0009083040302948337,
"loss": 1.9594,
"step": 750
},
{
"epoch": 0.3894839337877313,
"grad_norm": 0.7514945268630981,
"learning_rate": 0.000894779550987287,
"loss": 1.9278,
"step": 800
},
{
"epoch": 0.41382667964946446,
"grad_norm": 0.7884072661399841,
"learning_rate": 0.0008812550716797404,
"loss": 1.9345,
"step": 850
},
{
"epoch": 0.43816942551119764,
"grad_norm": 0.7612814903259277,
"learning_rate": 0.0008677305923721937,
"loss": 1.9081,
"step": 900
},
{
"epoch": 0.4625121713729309,
"grad_norm": 0.7805455327033997,
"learning_rate": 0.000854206113064647,
"loss": 1.9188,
"step": 950
},
{
"epoch": 0.48685491723466406,
"grad_norm": 0.7169475555419922,
"learning_rate": 0.0008406816337571004,
"loss": 1.8903,
"step": 1000
},
{
"epoch": 0.48685491723466406,
"eval_loss": 1.919243335723877,
"eval_runtime": 452.9281,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 1000
},
{
"epoch": 0.5111976630963972,
"grad_norm": 0.7700228095054626,
"learning_rate": 0.0008271571544495538,
"loss": 1.8996,
"step": 1050
},
{
"epoch": 0.5355404089581305,
"grad_norm": 0.8165948390960693,
"learning_rate": 0.000813632675142007,
"loss": 1.9178,
"step": 1100
},
{
"epoch": 0.5598831548198637,
"grad_norm": 0.6771554350852966,
"learning_rate": 0.0008001081958344604,
"loss": 1.9012,
"step": 1150
},
{
"epoch": 0.5842259006815969,
"grad_norm": 0.7270589470863342,
"learning_rate": 0.0007865837165269137,
"loss": 1.8971,
"step": 1200
},
{
"epoch": 0.6085686465433301,
"grad_norm": 0.7590579390525818,
"learning_rate": 0.0007730592372193671,
"loss": 1.8842,
"step": 1250
},
{
"epoch": 0.6329113924050633,
"grad_norm": 0.642905592918396,
"learning_rate": 0.0007595347579118204,
"loss": 1.8661,
"step": 1300
},
{
"epoch": 0.6572541382667965,
"grad_norm": 0.7350940704345703,
"learning_rate": 0.0007460102786042737,
"loss": 1.8792,
"step": 1350
},
{
"epoch": 0.6815968841285297,
"grad_norm": 0.7721772193908691,
"learning_rate": 0.0007324857992967271,
"loss": 1.8627,
"step": 1400
},
{
"epoch": 0.7059396299902629,
"grad_norm": 0.7251712083816528,
"learning_rate": 0.0007189613199891805,
"loss": 1.8715,
"step": 1450
},
{
"epoch": 0.7302823758519961,
"grad_norm": 0.6874309778213501,
"learning_rate": 0.0007054368406816337,
"loss": 1.8491,
"step": 1500
},
{
"epoch": 0.7302823758519961,
"eval_loss": 1.8761951923370361,
"eval_runtime": 452.8961,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 1500
},
{
"epoch": 0.7546251217137293,
"grad_norm": 0.7240548729896545,
"learning_rate": 0.000691912361374087,
"loss": 1.8585,
"step": 1550
},
{
"epoch": 0.7789678675754625,
"grad_norm": 0.6931944489479065,
"learning_rate": 0.0006783878820665405,
"loss": 1.8623,
"step": 1600
},
{
"epoch": 0.8033106134371957,
"grad_norm": 0.710396409034729,
"learning_rate": 0.0006648634027589938,
"loss": 1.8467,
"step": 1650
},
{
"epoch": 0.8276533592989289,
"grad_norm": 0.7441169619560242,
"learning_rate": 0.0006513389234514471,
"loss": 1.857,
"step": 1700
},
{
"epoch": 0.8519961051606622,
"grad_norm": 0.752155065536499,
"learning_rate": 0.0006378144441439005,
"loss": 1.8404,
"step": 1750
},
{
"epoch": 0.8763388510223953,
"grad_norm": 0.6920920014381409,
"learning_rate": 0.0006242899648363538,
"loss": 1.8331,
"step": 1800
},
{
"epoch": 0.9006815968841285,
"grad_norm": 0.6669381260871887,
"learning_rate": 0.0006107654855288071,
"loss": 1.8466,
"step": 1850
},
{
"epoch": 0.9250243427458618,
"grad_norm": 0.6877081394195557,
"learning_rate": 0.0005972410062212606,
"loss": 1.829,
"step": 1900
},
{
"epoch": 0.9493670886075949,
"grad_norm": 0.7491869926452637,
"learning_rate": 0.0005837165269137138,
"loss": 1.8331,
"step": 1950
},
{
"epoch": 0.9737098344693281,
"grad_norm": 0.7513737082481384,
"learning_rate": 0.0005701920476061671,
"loss": 1.8106,
"step": 2000
},
{
"epoch": 0.9737098344693281,
"eval_loss": 1.8369135856628418,
"eval_runtime": 452.8951,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 4108,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 3.6955207237632e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}