dimasik2987's picture
Training in progress, step 25, checkpoint
ed8782e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.09861932938856016,
"eval_steps": 3,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0039447731755424065,
"grad_norm": 12.800737380981445,
"learning_rate": 2e-05,
"loss": 4.4657,
"step": 1
},
{
"epoch": 0.0039447731755424065,
"eval_loss": 4.758890151977539,
"eval_runtime": 22.2538,
"eval_samples_per_second": 9.616,
"eval_steps_per_second": 4.808,
"step": 1
},
{
"epoch": 0.007889546351084813,
"grad_norm": 12.97591781616211,
"learning_rate": 4e-05,
"loss": 4.9917,
"step": 2
},
{
"epoch": 0.011834319526627219,
"grad_norm": 12.59117603302002,
"learning_rate": 6e-05,
"loss": 4.6719,
"step": 3
},
{
"epoch": 0.011834319526627219,
"eval_loss": 4.388483047485352,
"eval_runtime": 22.2476,
"eval_samples_per_second": 9.619,
"eval_steps_per_second": 4.81,
"step": 3
},
{
"epoch": 0.015779092702169626,
"grad_norm": 13.937124252319336,
"learning_rate": 8e-05,
"loss": 4.4163,
"step": 4
},
{
"epoch": 0.01972386587771203,
"grad_norm": 13.813149452209473,
"learning_rate": 0.0001,
"loss": 3.3764,
"step": 5
},
{
"epoch": 0.023668639053254437,
"grad_norm": 8.125162124633789,
"learning_rate": 0.00012,
"loss": 2.1404,
"step": 6
},
{
"epoch": 0.023668639053254437,
"eval_loss": 1.3306505680084229,
"eval_runtime": 22.355,
"eval_samples_per_second": 9.573,
"eval_steps_per_second": 4.786,
"step": 6
},
{
"epoch": 0.027613412228796843,
"grad_norm": 8.103511810302734,
"learning_rate": 0.00014,
"loss": 1.3031,
"step": 7
},
{
"epoch": 0.03155818540433925,
"grad_norm": 6.617693901062012,
"learning_rate": 0.00016,
"loss": 0.8372,
"step": 8
},
{
"epoch": 0.03550295857988166,
"grad_norm": 3.7571351528167725,
"learning_rate": 0.00018,
"loss": 0.396,
"step": 9
},
{
"epoch": 0.03550295857988166,
"eval_loss": 0.2699667811393738,
"eval_runtime": 22.2641,
"eval_samples_per_second": 9.612,
"eval_steps_per_second": 4.806,
"step": 9
},
{
"epoch": 0.03944773175542406,
"grad_norm": 2.737286329269409,
"learning_rate": 0.0002,
"loss": 0.3586,
"step": 10
},
{
"epoch": 0.04339250493096647,
"grad_norm": 2.9477121829986572,
"learning_rate": 0.00019781476007338058,
"loss": 0.357,
"step": 11
},
{
"epoch": 0.047337278106508875,
"grad_norm": 3.2014811038970947,
"learning_rate": 0.0001913545457642601,
"loss": 0.3266,
"step": 12
},
{
"epoch": 0.047337278106508875,
"eval_loss": 0.2208854705095291,
"eval_runtime": 22.3445,
"eval_samples_per_second": 9.577,
"eval_steps_per_second": 4.789,
"step": 12
},
{
"epoch": 0.05128205128205128,
"grad_norm": 1.2773289680480957,
"learning_rate": 0.00018090169943749476,
"loss": 0.1722,
"step": 13
},
{
"epoch": 0.055226824457593686,
"grad_norm": 2.2797818183898926,
"learning_rate": 0.00016691306063588583,
"loss": 0.1792,
"step": 14
},
{
"epoch": 0.05917159763313609,
"grad_norm": 1.130849838256836,
"learning_rate": 0.00015000000000000001,
"loss": 0.1988,
"step": 15
},
{
"epoch": 0.05917159763313609,
"eval_loss": 0.20907557010650635,
"eval_runtime": 22.2441,
"eval_samples_per_second": 9.621,
"eval_steps_per_second": 4.81,
"step": 15
},
{
"epoch": 0.0631163708086785,
"grad_norm": 1.6371982097625732,
"learning_rate": 0.00013090169943749476,
"loss": 0.336,
"step": 16
},
{
"epoch": 0.0670611439842209,
"grad_norm": 1.4365739822387695,
"learning_rate": 0.00011045284632676536,
"loss": 0.2788,
"step": 17
},
{
"epoch": 0.07100591715976332,
"grad_norm": 0.5848339796066284,
"learning_rate": 8.954715367323468e-05,
"loss": 0.0984,
"step": 18
},
{
"epoch": 0.07100591715976332,
"eval_loss": 0.21445059776306152,
"eval_runtime": 22.3011,
"eval_samples_per_second": 9.596,
"eval_steps_per_second": 4.798,
"step": 18
},
{
"epoch": 0.07495069033530571,
"grad_norm": 2.026401996612549,
"learning_rate": 6.909830056250527e-05,
"loss": 0.2734,
"step": 19
},
{
"epoch": 0.07889546351084813,
"grad_norm": 1.0910876989364624,
"learning_rate": 5.000000000000002e-05,
"loss": 0.215,
"step": 20
},
{
"epoch": 0.08284023668639054,
"grad_norm": 1.227197527885437,
"learning_rate": 3.308693936411421e-05,
"loss": 0.2262,
"step": 21
},
{
"epoch": 0.08284023668639054,
"eval_loss": 0.1881045401096344,
"eval_runtime": 22.3414,
"eval_samples_per_second": 9.579,
"eval_steps_per_second": 4.789,
"step": 21
},
{
"epoch": 0.08678500986193294,
"grad_norm": 0.9256035089492798,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.1988,
"step": 22
},
{
"epoch": 0.09072978303747535,
"grad_norm": 0.8593224883079529,
"learning_rate": 8.645454235739903e-06,
"loss": 0.2024,
"step": 23
},
{
"epoch": 0.09467455621301775,
"grad_norm": 1.7054708003997803,
"learning_rate": 2.1852399266194314e-06,
"loss": 0.277,
"step": 24
},
{
"epoch": 0.09467455621301775,
"eval_loss": 0.18979641795158386,
"eval_runtime": 22.2705,
"eval_samples_per_second": 9.609,
"eval_steps_per_second": 4.805,
"step": 24
},
{
"epoch": 0.09861932938856016,
"grad_norm": 0.966243326663971,
"learning_rate": 0.0,
"loss": 0.1495,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.26729302278144e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}