adammandic87's picture
Training in progress, step 200, checkpoint
379f236 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.27787426189649184,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013893713094824591,
"eval_loss": 1.592242956161499,
"eval_runtime": 5.737,
"eval_samples_per_second": 52.815,
"eval_steps_per_second": 26.495,
"step": 1
},
{
"epoch": 0.013893713094824591,
"grad_norm": 2.1929519176483154,
"learning_rate": 0.0002,
"loss": 1.1021,
"step": 10
},
{
"epoch": 0.027787426189649182,
"grad_norm": 1.7123305797576904,
"learning_rate": 0.0002,
"loss": 0.506,
"step": 20
},
{
"epoch": 0.04168113928447378,
"grad_norm": 1.6912617683410645,
"learning_rate": 0.0002,
"loss": 0.3446,
"step": 30
},
{
"epoch": 0.055574852379298365,
"grad_norm": 1.588252305984497,
"learning_rate": 0.0002,
"loss": 0.3451,
"step": 40
},
{
"epoch": 0.06946856547412296,
"grad_norm": 0.7013223171234131,
"learning_rate": 0.0002,
"loss": 0.3116,
"step": 50
},
{
"epoch": 0.06946856547412296,
"eval_loss": 0.30412161350250244,
"eval_runtime": 4.894,
"eval_samples_per_second": 61.912,
"eval_steps_per_second": 31.058,
"step": 50
},
{
"epoch": 0.08336227856894755,
"grad_norm": 1.06772780418396,
"learning_rate": 0.0002,
"loss": 0.2407,
"step": 60
},
{
"epoch": 0.09725599166377215,
"grad_norm": 4.187992572784424,
"learning_rate": 0.0002,
"loss": 0.2153,
"step": 70
},
{
"epoch": 0.11114970475859673,
"grad_norm": 1.100953221321106,
"learning_rate": 0.0002,
"loss": 0.2603,
"step": 80
},
{
"epoch": 0.12504341785342132,
"grad_norm": 1.9511982202529907,
"learning_rate": 0.0002,
"loss": 0.2081,
"step": 90
},
{
"epoch": 0.13893713094824592,
"grad_norm": 0.6646924614906311,
"learning_rate": 0.0002,
"loss": 0.2752,
"step": 100
},
{
"epoch": 0.13893713094824592,
"eval_loss": 0.27076953649520874,
"eval_runtime": 4.9578,
"eval_samples_per_second": 61.116,
"eval_steps_per_second": 30.659,
"step": 100
},
{
"epoch": 0.1528308440430705,
"grad_norm": 0.974806547164917,
"learning_rate": 0.0002,
"loss": 0.292,
"step": 110
},
{
"epoch": 0.1667245571378951,
"grad_norm": 0.9329864382743835,
"learning_rate": 0.0002,
"loss": 0.2803,
"step": 120
},
{
"epoch": 0.1806182702327197,
"grad_norm": 1.1351346969604492,
"learning_rate": 0.0002,
"loss": 0.2498,
"step": 130
},
{
"epoch": 0.1945119833275443,
"grad_norm": 0.8371953368186951,
"learning_rate": 0.0002,
"loss": 0.2312,
"step": 140
},
{
"epoch": 0.2084056964223689,
"grad_norm": 1.1801189184188843,
"learning_rate": 0.0002,
"loss": 0.2219,
"step": 150
},
{
"epoch": 0.2084056964223689,
"eval_loss": 0.256499707698822,
"eval_runtime": 4.9053,
"eval_samples_per_second": 61.769,
"eval_steps_per_second": 30.987,
"step": 150
},
{
"epoch": 0.22229940951719346,
"grad_norm": 0.9388880729675293,
"learning_rate": 0.0002,
"loss": 0.2882,
"step": 160
},
{
"epoch": 0.23619312261201805,
"grad_norm": 0.5570294857025146,
"learning_rate": 0.0002,
"loss": 0.2089,
"step": 170
},
{
"epoch": 0.25008683570684265,
"grad_norm": 0.8899029493331909,
"learning_rate": 0.0002,
"loss": 0.2759,
"step": 180
},
{
"epoch": 0.26398054880166727,
"grad_norm": 1.1891875267028809,
"learning_rate": 0.0002,
"loss": 0.1959,
"step": 190
},
{
"epoch": 0.27787426189649184,
"grad_norm": 1.2228397130966187,
"learning_rate": 0.0002,
"loss": 0.2336,
"step": 200
},
{
"epoch": 0.27787426189649184,
"eval_loss": 0.2487281858921051,
"eval_runtime": 4.97,
"eval_samples_per_second": 60.965,
"eval_steps_per_second": 30.583,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1534476052070400.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}