fats-fme's picture
Training in progress, step 200, checkpoint
22eba06 verified
{
"best_metric": 11.5,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.008467579754016808,
"eval_steps": 100,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.2337898770084044e-05,
"eval_loss": 11.5,
"eval_runtime": 100.6746,
"eval_samples_per_second": 98.784,
"eval_steps_per_second": 49.397,
"step": 1
},
{
"epoch": 0.0004233789877008404,
"grad_norm": 3.8532682083314285e-05,
"learning_rate": 1e-05,
"loss": 46.0,
"step": 10
},
{
"epoch": 0.0008467579754016808,
"grad_norm": 1.7726280930219218e-05,
"learning_rate": 2e-05,
"loss": 46.0,
"step": 20
},
{
"epoch": 0.0012701369631025212,
"grad_norm": 2.8598151402547956e-05,
"learning_rate": 3e-05,
"loss": 46.0,
"step": 30
},
{
"epoch": 0.0016935159508033616,
"grad_norm": 3.5454264434520155e-05,
"learning_rate": 4e-05,
"loss": 46.0,
"step": 40
},
{
"epoch": 0.002116894938504202,
"grad_norm": 3.511717659421265e-05,
"learning_rate": 5e-05,
"loss": 46.0,
"step": 50
},
{
"epoch": 0.0025402739262050424,
"grad_norm": 6.926015339558944e-05,
"learning_rate": 6e-05,
"loss": 46.0,
"step": 60
},
{
"epoch": 0.002963652913905883,
"grad_norm": 0.0002537610998842865,
"learning_rate": 7e-05,
"loss": 46.0,
"step": 70
},
{
"epoch": 0.0033870319016067233,
"grad_norm": 4.098670251551084e-05,
"learning_rate": 8e-05,
"loss": 46.0,
"step": 80
},
{
"epoch": 0.0038104108893075635,
"grad_norm": 6.75772680551745e-05,
"learning_rate": 9e-05,
"loss": 46.0,
"step": 90
},
{
"epoch": 0.004233789877008404,
"grad_norm": 0.00013273257354740053,
"learning_rate": 0.0001,
"loss": 46.0,
"step": 100
},
{
"epoch": 0.004233789877008404,
"eval_loss": 11.5,
"eval_runtime": 100.7104,
"eval_samples_per_second": 98.748,
"eval_steps_per_second": 49.379,
"step": 100
},
{
"epoch": 0.004657168864709245,
"grad_norm": 0.0001554292975924909,
"learning_rate": 9.755282581475769e-05,
"loss": 46.0,
"step": 110
},
{
"epoch": 0.005080547852410085,
"grad_norm": 0.00017063747509382665,
"learning_rate": 9.045084971874738e-05,
"loss": 46.0,
"step": 120
},
{
"epoch": 0.005503926840110925,
"grad_norm": 0.0002251509140478447,
"learning_rate": 7.938926261462366e-05,
"loss": 46.0,
"step": 130
},
{
"epoch": 0.005927305827811766,
"grad_norm": 0.00025318050757050514,
"learning_rate": 6.545084971874738e-05,
"loss": 46.0,
"step": 140
},
{
"epoch": 0.006350684815512606,
"grad_norm": 0.00024476656108163297,
"learning_rate": 5e-05,
"loss": 46.0,
"step": 150
},
{
"epoch": 0.006774063803213447,
"grad_norm": 0.0004327484348323196,
"learning_rate": 3.4549150281252636e-05,
"loss": 46.0,
"step": 160
},
{
"epoch": 0.007197442790914287,
"grad_norm": 0.00031539108022116125,
"learning_rate": 2.061073738537635e-05,
"loss": 46.0,
"step": 170
},
{
"epoch": 0.007620821778615127,
"grad_norm": 0.00022764148889109492,
"learning_rate": 9.549150281252633e-06,
"loss": 46.0,
"step": 180
},
{
"epoch": 0.008044200766315967,
"grad_norm": 0.00025987360277213156,
"learning_rate": 2.4471741852423237e-06,
"loss": 46.0,
"step": 190
},
{
"epoch": 0.008467579754016808,
"grad_norm": 0.00024760179803706706,
"learning_rate": 0.0,
"loss": 46.0,
"step": 200
},
{
"epoch": 0.008467579754016808,
"eval_loss": 11.5,
"eval_runtime": 101.2299,
"eval_samples_per_second": 98.242,
"eval_steps_per_second": 49.126,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 16362094854144.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}