dmWM-mistralai-Ministral-8B-Instruct-2410-LucieFr-Al4-OWT-d4-a0.1-v2
/
checkpoint-500
/trainer_state.json
{ | |
"best_global_step": null, | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.2, | |
"eval_steps": 500, | |
"global_step": 500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.004, | |
"grad_norm": 192.0, | |
"learning_rate": 7.2e-07, | |
"loss": 17.8929, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.008, | |
"grad_norm": 116.5, | |
"learning_rate": 1.52e-06, | |
"loss": 17.1894, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.012, | |
"grad_norm": 93.5, | |
"learning_rate": 2.3200000000000002e-06, | |
"loss": 17.1662, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.016, | |
"grad_norm": 59.0, | |
"learning_rate": 3.12e-06, | |
"loss": 15.5562, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.02, | |
"grad_norm": 93.5, | |
"learning_rate": 3.920000000000001e-06, | |
"loss": 14.6218, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.024, | |
"grad_norm": 122.5, | |
"learning_rate": 4.7200000000000005e-06, | |
"loss": 14.0262, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.028, | |
"grad_norm": 135.0, | |
"learning_rate": 5.5200000000000005e-06, | |
"loss": 14.516, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.032, | |
"grad_norm": 211.0, | |
"learning_rate": 6.3200000000000005e-06, | |
"loss": 13.598, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.036, | |
"grad_norm": 172.0, | |
"learning_rate": 7.1200000000000004e-06, | |
"loss": 14.4849, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.04, | |
"grad_norm": 183.0, | |
"learning_rate": 7.92e-06, | |
"loss": 13.3844, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.044, | |
"grad_norm": 144.0, | |
"learning_rate": 8.720000000000001e-06, | |
"loss": 13.5488, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.048, | |
"grad_norm": 153.0, | |
"learning_rate": 9.52e-06, | |
"loss": 14.8995, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.052, | |
"grad_norm": 156.0, | |
"learning_rate": 1.0320000000000001e-05, | |
"loss": 14.0248, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.056, | |
"grad_norm": 276.0, | |
"learning_rate": 1.1120000000000002e-05, | |
"loss": 14.2637, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.06, | |
"grad_norm": 176.0, | |
"learning_rate": 1.1920000000000001e-05, | |
"loss": 15.7517, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.064, | |
"grad_norm": 178.0, | |
"learning_rate": 1.2720000000000002e-05, | |
"loss": 15.2519, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.068, | |
"grad_norm": 5408.0, | |
"learning_rate": 1.3520000000000003e-05, | |
"loss": 51.245, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.072, | |
"grad_norm": 576.0, | |
"learning_rate": 1.432e-05, | |
"loss": 23.1949, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.076, | |
"grad_norm": 402.0, | |
"learning_rate": 1.5120000000000001e-05, | |
"loss": 18.3584, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 178.0, | |
"learning_rate": 1.5920000000000003e-05, | |
"loss": 17.5383, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.084, | |
"grad_norm": 2064.0, | |
"learning_rate": 1.672e-05, | |
"loss": 22.9987, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.088, | |
"grad_norm": 190.0, | |
"learning_rate": 1.752e-05, | |
"loss": 19.4033, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.092, | |
"grad_norm": 150.0, | |
"learning_rate": 1.832e-05, | |
"loss": 15.7816, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.096, | |
"grad_norm": 182.0, | |
"learning_rate": 1.912e-05, | |
"loss": 20.0571, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 302.0, | |
"learning_rate": 1.9920000000000002e-05, | |
"loss": 19.3505, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.104, | |
"grad_norm": 246.0, | |
"learning_rate": 1.9999210442038164e-05, | |
"loss": 19.3353, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.108, | |
"grad_norm": 139.0, | |
"learning_rate": 1.9996481265944146e-05, | |
"loss": 17.5568, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.112, | |
"grad_norm": 314.0, | |
"learning_rate": 1.9991803256020393e-05, | |
"loss": 24.114, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.116, | |
"grad_norm": 168.0, | |
"learning_rate": 1.99851773242542e-05, | |
"loss": 24.3222, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.12, | |
"grad_norm": 157.0, | |
"learning_rate": 1.99766047623841e-05, | |
"loss": 17.1357, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.124, | |
"grad_norm": 243.0, | |
"learning_rate": 1.996608724164801e-05, | |
"loss": 17.5944, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.128, | |
"grad_norm": 168.0, | |
"learning_rate": 1.995362681245744e-05, | |
"loss": 17.9973, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.132, | |
"grad_norm": 129.0, | |
"learning_rate": 1.9939225903997748e-05, | |
"loss": 16.3556, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.136, | |
"grad_norm": 109.0, | |
"learning_rate": 1.992288732375458e-05, | |
"loss": 15.275, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.14, | |
"grad_norm": 148.0, | |
"learning_rate": 1.9904614256966514e-05, | |
"loss": 16.484, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.144, | |
"grad_norm": 139.0, | |
"learning_rate": 1.9884410266004134e-05, | |
"loss": 17.0713, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.148, | |
"grad_norm": 252.0, | |
"learning_rate": 1.986227928967551e-05, | |
"loss": 17.0777, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.152, | |
"grad_norm": 160.0, | |
"learning_rate": 1.983822564245833e-05, | |
"loss": 16.9359, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.156, | |
"grad_norm": 128.0, | |
"learning_rate": 1.981225401365877e-05, | |
"loss": 33.823, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 142.0, | |
"learning_rate": 1.9784369466497333e-05, | |
"loss": 17.8366, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.164, | |
"grad_norm": 604.0, | |
"learning_rate": 1.9754577437121733e-05, | |
"loss": 17.0926, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.168, | |
"grad_norm": 138.0, | |
"learning_rate": 1.9722883733547128e-05, | |
"loss": 25.0166, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.172, | |
"grad_norm": 140.0, | |
"learning_rate": 1.968929453452383e-05, | |
"loss": 16.1607, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.176, | |
"grad_norm": 100.0, | |
"learning_rate": 1.965381638833274e-05, | |
"loss": 15.8693, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.18, | |
"grad_norm": 127.5, | |
"learning_rate": 1.9616456211508756e-05, | |
"loss": 16.6326, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.184, | |
"grad_norm": 136.0, | |
"learning_rate": 1.9577221287492368e-05, | |
"loss": 16.4813, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.188, | |
"grad_norm": 175.0, | |
"learning_rate": 1.9536119265209763e-05, | |
"loss": 16.9464, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.192, | |
"grad_norm": 128.0, | |
"learning_rate": 1.9493158157581617e-05, | |
"loss": 16.8985, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.196, | |
"grad_norm": 141.0, | |
"learning_rate": 1.9448346339960984e-05, | |
"loss": 16.1715, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.2, | |
"grad_norm": 112.0, | |
"learning_rate": 1.9401692548500504e-05, | |
"loss": 15.8461, | |
"step": 500 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 2500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 9223372036854775807, | |
"save_steps": 500, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 3.678013497540608e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |