{ "best_global_step": 300, "best_metric": 8.330621542466645, "best_model_checkpoint": "./whisper-finetune-small/checkpoint-300", "epoch": 33.34285714285714, "eval_steps": 50, "global_step": 300, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.1142857142857143, "grad_norm": 13.765691757202148, "learning_rate": 2.666666666666667e-06, "loss": 1.0811, "step": 10 }, { "epoch": 2.2285714285714286, "grad_norm": 8.985676765441895, "learning_rate": 6e-06, "loss": 0.5649, "step": 20 }, { "epoch": 3.342857142857143, "grad_norm": 4.251060485839844, "learning_rate": 9.333333333333334e-06, "loss": 0.255, "step": 30 }, { "epoch": 4.457142857142857, "grad_norm": 5.627696990966797, "learning_rate": 9.703703703703703e-06, "loss": 0.1037, "step": 40 }, { "epoch": 5.571428571428571, "grad_norm": 3.0673668384552, "learning_rate": 9.333333333333334e-06, "loss": 0.0595, "step": 50 }, { "epoch": 5.571428571428571, "eval_cer": 9.404490725675235, "eval_loss": 0.2722940146923065, "eval_model_preparation_time": 0.0054, "eval_runtime": 44.9214, "eval_samples_per_second": 1.358, "eval_steps_per_second": 0.356, "step": 50 }, { "epoch": 6.685714285714286, "grad_norm": 0.43924692273139954, "learning_rate": 8.962962962962963e-06, "loss": 0.0276, "step": 60 }, { "epoch": 7.8, "grad_norm": 0.8204056024551392, "learning_rate": 8.592592592592593e-06, "loss": 0.0137, "step": 70 }, { "epoch": 8.914285714285715, "grad_norm": 0.2582692801952362, "learning_rate": 8.222222222222222e-06, "loss": 0.0099, "step": 80 }, { "epoch": 10.0, "grad_norm": 1.72308349609375, "learning_rate": 7.851851851851853e-06, "loss": 0.0068, "step": 90 }, { "epoch": 11.114285714285714, "grad_norm": 0.21061824262142181, "learning_rate": 7.481481481481482e-06, "loss": 0.005, "step": 100 }, { "epoch": 11.114285714285714, "eval_cer": 8.688577936869509, "eval_loss": 0.27061235904693604, "eval_model_preparation_time": 0.0054, "eval_runtime": 41.153, "eval_samples_per_second": 1.482, "eval_steps_per_second": 0.389, "step": 100 }, { "epoch": 12.228571428571428, "grad_norm": 0.053500544279813766, "learning_rate": 7.111111111111112e-06, "loss": 0.0024, "step": 110 }, { "epoch": 13.342857142857143, "grad_norm": 0.01863274723291397, "learning_rate": 6.740740740740741e-06, "loss": 0.0021, "step": 120 }, { "epoch": 14.457142857142857, "grad_norm": 0.12402216345071793, "learning_rate": 6.370370370370371e-06, "loss": 0.0016, "step": 130 }, { "epoch": 15.571428571428571, "grad_norm": 0.08812814950942993, "learning_rate": 6e-06, "loss": 0.0011, "step": 140 }, { "epoch": 16.685714285714287, "grad_norm": 0.01754395291209221, "learning_rate": 5.62962962962963e-06, "loss": 0.0009, "step": 150 }, { "epoch": 16.685714285714287, "eval_cer": 8.623494956068987, "eval_loss": 0.28332972526550293, "eval_model_preparation_time": 0.0054, "eval_runtime": 40.2364, "eval_samples_per_second": 1.516, "eval_steps_per_second": 0.398, "step": 150 }, { "epoch": 17.8, "grad_norm": 0.027628909796476364, "learning_rate": 5.259259259259259e-06, "loss": 0.001, "step": 160 }, { "epoch": 18.914285714285715, "grad_norm": 0.01541728712618351, "learning_rate": 4.888888888888889e-06, "loss": 0.0008, "step": 170 }, { "epoch": 20.0, "grad_norm": 0.011407773941755295, "learning_rate": 4.5185185185185185e-06, "loss": 0.0008, "step": 180 }, { "epoch": 21.114285714285714, "grad_norm": 0.018033821135759354, "learning_rate": 4.1481481481481485e-06, "loss": 0.0008, "step": 190 }, { "epoch": 22.228571428571428, "grad_norm": 0.01384884025901556, "learning_rate": 3.777777777777778e-06, "loss": 0.0007, "step": 200 }, { "epoch": 22.228571428571428, "eval_cer": 8.590953465668727, "eval_loss": 0.28691554069519043, "eval_model_preparation_time": 0.0054, "eval_runtime": 41.4927, "eval_samples_per_second": 1.47, "eval_steps_per_second": 0.386, "step": 200 }, { "epoch": 23.34285714285714, "grad_norm": 0.016528146341443062, "learning_rate": 3.4074074074074077e-06, "loss": 0.0007, "step": 210 }, { "epoch": 24.457142857142856, "grad_norm": 0.03627822548151016, "learning_rate": 3.0370370370370372e-06, "loss": 0.0006, "step": 220 }, { "epoch": 25.571428571428573, "grad_norm": 0.024156253784894943, "learning_rate": 2.666666666666667e-06, "loss": 0.0006, "step": 230 }, { "epoch": 26.685714285714287, "grad_norm": 0.02000688761472702, "learning_rate": 2.2962962962962964e-06, "loss": 0.0007, "step": 240 }, { "epoch": 27.8, "grad_norm": 0.019504278898239136, "learning_rate": 1.925925925925926e-06, "loss": 0.0005, "step": 250 }, { "epoch": 27.8, "eval_cer": 8.395704523267165, "eval_loss": 0.28907671570777893, "eval_model_preparation_time": 0.0054, "eval_runtime": 49.5194, "eval_samples_per_second": 1.232, "eval_steps_per_second": 0.323, "step": 250 }, { "epoch": 28.914285714285715, "grad_norm": 0.007301762234419584, "learning_rate": 1.5555555555555558e-06, "loss": 0.0005, "step": 260 }, { "epoch": 30.0, "grad_norm": 0.020241765305399895, "learning_rate": 1.1851851851851854e-06, "loss": 0.0006, "step": 270 }, { "epoch": 31.114285714285714, "grad_norm": 0.00909515842795372, "learning_rate": 8.14814814814815e-07, "loss": 0.0005, "step": 280 }, { "epoch": 32.22857142857143, "grad_norm": 0.01881455071270466, "learning_rate": 4.444444444444445e-07, "loss": 0.0006, "step": 290 }, { "epoch": 33.34285714285714, "grad_norm": 0.018618324771523476, "learning_rate": 7.407407407407409e-08, "loss": 0.0006, "step": 300 }, { "epoch": 33.34285714285714, "eval_cer": 8.330621542466645, "eval_loss": 0.28973376750946045, "eval_model_preparation_time": 0.0054, "eval_runtime": 41.7224, "eval_samples_per_second": 1.462, "eval_steps_per_second": 0.383, "step": 300 } ], "logging_steps": 10, "max_steps": 300, "num_input_tokens_seen": 0, "num_train_epochs": 38, "save_steps": 50, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.34711664869376e+18, "train_batch_size": 4, "trial_name": null, "trial_params": null }