sarch7040's picture
Optimized fine-tuned model
5b61575 verified
{
"best_metric": 1.919243335723877,
"best_model_checkpoint": "./llama3-itihasa/checkpoint-1000",
"epoch": 0.48685491723466406,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024342745861733205,
"grad_norm": 0.798042893409729,
"learning_rate": 0.00012165450121654502,
"loss": 2.421,
"step": 50
},
{
"epoch": 0.04868549172346641,
"grad_norm": 0.6517059803009033,
"learning_rate": 0.00024330900243309004,
"loss": 2.0325,
"step": 100
},
{
"epoch": 0.0730282375851996,
"grad_norm": 1.047722339630127,
"learning_rate": 0.00036496350364963507,
"loss": 2.0045,
"step": 150
},
{
"epoch": 0.09737098344693282,
"grad_norm": 0.724214494228363,
"learning_rate": 0.00048661800486618007,
"loss": 1.9665,
"step": 200
},
{
"epoch": 0.12171372930866602,
"grad_norm": 0.7182360887527466,
"learning_rate": 0.0006082725060827251,
"loss": 1.9531,
"step": 250
},
{
"epoch": 0.1460564751703992,
"grad_norm": 0.741189181804657,
"learning_rate": 0.0007299270072992701,
"loss": 1.9566,
"step": 300
},
{
"epoch": 0.17039922103213243,
"grad_norm": 0.7193257212638855,
"learning_rate": 0.0008515815085158151,
"loss": 1.9536,
"step": 350
},
{
"epoch": 0.19474196689386564,
"grad_norm": 0.7618579864501953,
"learning_rate": 0.0009732360097323601,
"loss": 1.9642,
"step": 400
},
{
"epoch": 0.21908471275559882,
"grad_norm": 0.8242681622505188,
"learning_rate": 0.0009894509061401135,
"loss": 1.9899,
"step": 450
},
{
"epoch": 0.24342745861733203,
"grad_norm": 1.0397591590881348,
"learning_rate": 0.0009759264268325669,
"loss": 1.9819,
"step": 500
},
{
"epoch": 0.24342745861733203,
"eval_loss": 1.9741202592849731,
"eval_runtime": 452.9914,
"eval_samples_per_second": 3.022,
"eval_steps_per_second": 0.757,
"step": 500
},
{
"epoch": 0.26777020447906524,
"grad_norm": 0.904543936252594,
"learning_rate": 0.0009624019475250203,
"loss": 1.9513,
"step": 550
},
{
"epoch": 0.2921129503407984,
"grad_norm": 0.8555233478546143,
"learning_rate": 0.0009488774682174737,
"loss": 1.9543,
"step": 600
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.7415252923965454,
"learning_rate": 0.000935352988909927,
"loss": 1.9621,
"step": 650
},
{
"epoch": 0.34079844206426485,
"grad_norm": 0.7195931077003479,
"learning_rate": 0.0009218285096023803,
"loss": 1.9414,
"step": 700
},
{
"epoch": 0.36514118792599803,
"grad_norm": 0.9943011999130249,
"learning_rate": 0.0009083040302948337,
"loss": 1.9594,
"step": 750
},
{
"epoch": 0.3894839337877313,
"grad_norm": 0.7514945268630981,
"learning_rate": 0.000894779550987287,
"loss": 1.9278,
"step": 800
},
{
"epoch": 0.41382667964946446,
"grad_norm": 0.7884072661399841,
"learning_rate": 0.0008812550716797404,
"loss": 1.9345,
"step": 850
},
{
"epoch": 0.43816942551119764,
"grad_norm": 0.7612814903259277,
"learning_rate": 0.0008677305923721937,
"loss": 1.9081,
"step": 900
},
{
"epoch": 0.4625121713729309,
"grad_norm": 0.7805455327033997,
"learning_rate": 0.000854206113064647,
"loss": 1.9188,
"step": 950
},
{
"epoch": 0.48685491723466406,
"grad_norm": 0.7169475555419922,
"learning_rate": 0.0008406816337571004,
"loss": 1.8903,
"step": 1000
},
{
"epoch": 0.48685491723466406,
"eval_loss": 1.919243335723877,
"eval_runtime": 452.9281,
"eval_samples_per_second": 3.023,
"eval_steps_per_second": 0.757,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 4108,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 1.8477603618816e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}