Grogros-dmWM-Llama-3.2-1B-Instruct-M-A-O-d4-a0.25-OpenMathInstruct
/
checkpoint-2500
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.24210070784194454, | |
"eval_steps": 500, | |
"global_step": 2500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.009684028313677782, | |
"grad_norm": 1.3304239511489868, | |
"learning_rate": 8.000000000000001e-06, | |
"loss": 0.6269, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.019368056627355565, | |
"grad_norm": 1.3456004858016968, | |
"learning_rate": 1.6000000000000003e-05, | |
"loss": 0.3907, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.029052084941033347, | |
"grad_norm": 1.288303256034851, | |
"learning_rate": 1.9975640502598243e-05, | |
"loss": 0.3566, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.03873611325471113, | |
"grad_norm": 1.231126308441162, | |
"learning_rate": 1.9781476007338058e-05, | |
"loss": 0.3428, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.04842014156838891, | |
"grad_norm": 1.1136806011199951, | |
"learning_rate": 1.9396926207859085e-05, | |
"loss": 0.3361, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.058104169882066695, | |
"grad_norm": 1.1187803745269775, | |
"learning_rate": 1.8829475928589272e-05, | |
"loss": 0.3305, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.06778819819574447, | |
"grad_norm": 1.0419028997421265, | |
"learning_rate": 1.8090169943749477e-05, | |
"loss": 0.329, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.07747222650942226, | |
"grad_norm": 1.0367131233215332, | |
"learning_rate": 1.7193398003386514e-05, | |
"loss": 0.3222, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.08715625482310004, | |
"grad_norm": 1.1226321458816528, | |
"learning_rate": 1.6156614753256583e-05, | |
"loss": 0.3219, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.09684028313677782, | |
"grad_norm": 0.9624041318893433, | |
"learning_rate": 1.5000000000000002e-05, | |
"loss": 0.3173, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.1065243114504556, | |
"grad_norm": 1.0128655433654785, | |
"learning_rate": 1.3746065934159123e-05, | |
"loss": 0.3103, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.11620833976413339, | |
"grad_norm": 0.9672228693962097, | |
"learning_rate": 1.2419218955996677e-05, | |
"loss": 0.3138, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.12589236807781118, | |
"grad_norm": 0.9582985639572144, | |
"learning_rate": 1.1045284632676535e-05, | |
"loss": 0.3095, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.13557639639148894, | |
"grad_norm": 1.0016893148422241, | |
"learning_rate": 9.651005032974994e-06, | |
"loss": 0.3019, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.14526042470516673, | |
"grad_norm": 0.9507680535316467, | |
"learning_rate": 8.263518223330698e-06, | |
"loss": 0.2964, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 0.15494445301884452, | |
"grad_norm": 0.9306615591049194, | |
"learning_rate": 6.909830056250527e-06, | |
"loss": 0.2942, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 0.1646284813325223, | |
"grad_norm": 0.9242100715637207, | |
"learning_rate": 5.616288532109225e-06, | |
"loss": 0.2936, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 0.17431250964620007, | |
"grad_norm": 1.1241374015808105, | |
"learning_rate": 4.408070965292534e-06, | |
"loss": 0.2919, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 0.18399653795987786, | |
"grad_norm": 1.051304817199707, | |
"learning_rate": 3.308693936411421e-06, | |
"loss": 0.2889, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 0.19368056627355565, | |
"grad_norm": 0.9818484783172607, | |
"learning_rate": 2.339555568810221e-06, | |
"loss": 0.2854, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 0.20336459458723344, | |
"grad_norm": 0.9617915749549866, | |
"learning_rate": 1.5195190384357405e-06, | |
"loss": 0.2792, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 0.2130486229009112, | |
"grad_norm": 1.021549940109253, | |
"learning_rate": 8.645454235739903e-07, | |
"loss": 0.2843, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 0.222732651214589, | |
"grad_norm": 0.9170396327972412, | |
"learning_rate": 3.8738304061681107e-07, | |
"loss": 0.2855, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 0.23241667952826678, | |
"grad_norm": 0.9328998923301697, | |
"learning_rate": 9.731931258429638e-08, | |
"loss": 0.2844, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 0.24210070784194454, | |
"grad_norm": 0.9335331320762634, | |
"learning_rate": 0.0, | |
"loss": 0.2819, | |
"step": 2500 | |
} | |
], | |
"logging_steps": 100, | |
"max_steps": 2500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 2500, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.3916038848512e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |