{ "best_global_step": 150, "best_metric": 8.623494956068987, "best_model_checkpoint": "./whisper-finetune-small/checkpoint-150", "epoch": 16.685714285714287, "eval_steps": 50, "global_step": 150, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.1142857142857143, "grad_norm": 13.765691757202148, "learning_rate": 2.666666666666667e-06, "loss": 1.0811, "step": 10 }, { "epoch": 2.2285714285714286, "grad_norm": 8.985676765441895, "learning_rate": 6e-06, "loss": 0.5649, "step": 20 }, { "epoch": 3.342857142857143, "grad_norm": 4.251060485839844, "learning_rate": 9.333333333333334e-06, "loss": 0.255, "step": 30 }, { "epoch": 4.457142857142857, "grad_norm": 5.627696990966797, "learning_rate": 9.703703703703703e-06, "loss": 0.1037, "step": 40 }, { "epoch": 5.571428571428571, "grad_norm": 3.0673668384552, "learning_rate": 9.333333333333334e-06, "loss": 0.0595, "step": 50 }, { "epoch": 5.571428571428571, "eval_cer": 9.404490725675235, "eval_loss": 0.2722940146923065, "eval_model_preparation_time": 0.0054, "eval_runtime": 44.9214, "eval_samples_per_second": 1.358, "eval_steps_per_second": 0.356, "step": 50 }, { "epoch": 6.685714285714286, "grad_norm": 0.43924692273139954, "learning_rate": 8.962962962962963e-06, "loss": 0.0276, "step": 60 }, { "epoch": 7.8, "grad_norm": 0.8204056024551392, "learning_rate": 8.592592592592593e-06, "loss": 0.0137, "step": 70 }, { "epoch": 8.914285714285715, "grad_norm": 0.2582692801952362, "learning_rate": 8.222222222222222e-06, "loss": 0.0099, "step": 80 }, { "epoch": 10.0, "grad_norm": 1.72308349609375, "learning_rate": 7.851851851851853e-06, "loss": 0.0068, "step": 90 }, { "epoch": 11.114285714285714, "grad_norm": 0.21061824262142181, "learning_rate": 7.481481481481482e-06, "loss": 0.005, "step": 100 }, { "epoch": 11.114285714285714, "eval_cer": 8.688577936869509, "eval_loss": 0.27061235904693604, "eval_model_preparation_time": 0.0054, "eval_runtime": 41.153, "eval_samples_per_second": 1.482, "eval_steps_per_second": 0.389, "step": 100 }, { "epoch": 12.228571428571428, "grad_norm": 0.053500544279813766, "learning_rate": 7.111111111111112e-06, "loss": 0.0024, "step": 110 }, { "epoch": 13.342857142857143, "grad_norm": 0.01863274723291397, "learning_rate": 6.740740740740741e-06, "loss": 0.0021, "step": 120 }, { "epoch": 14.457142857142857, "grad_norm": 0.12402216345071793, "learning_rate": 6.370370370370371e-06, "loss": 0.0016, "step": 130 }, { "epoch": 15.571428571428571, "grad_norm": 0.08812814950942993, "learning_rate": 6e-06, "loss": 0.0011, "step": 140 }, { "epoch": 16.685714285714287, "grad_norm": 0.01754395291209221, "learning_rate": 5.62962962962963e-06, "loss": 0.0009, "step": 150 }, { "epoch": 16.685714285714287, "eval_cer": 8.623494956068987, "eval_loss": 0.28332972526550293, "eval_model_preparation_time": 0.0054, "eval_runtime": 40.2364, "eval_samples_per_second": 1.516, "eval_steps_per_second": 0.398, "step": 150 } ], "logging_steps": 10, "max_steps": 300, "num_input_tokens_seen": 0, "num_train_epochs": 38, "save_steps": 50, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 6.7413549514752e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }