{ "best_global_step": 600, "best_metric": 0.6628760695457458, "best_model_checkpoint": "/scratch/skscla001/experiments/datasets/results/whisper-medium-bigcgen-male-5hrs-62/checkpoint-600", "epoch": 4.3478260869565215, "eval_steps": 200, "global_step": 1400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.07763975155279502, "grad_norm": 33.341270446777344, "learning_rate": 4.0000000000000003e-07, "loss": 3.3362, "step": 25 }, { "epoch": 0.15527950310559005, "grad_norm": 26.828624725341797, "learning_rate": 9.000000000000001e-07, "loss": 2.6418, "step": 50 }, { "epoch": 0.2329192546583851, "grad_norm": 25.673059463500977, "learning_rate": 1.4000000000000001e-06, "loss": 2.017, "step": 75 }, { "epoch": 0.3105590062111801, "grad_norm": 17.314512252807617, "learning_rate": 1.9000000000000002e-06, "loss": 1.4498, "step": 100 }, { "epoch": 0.38819875776397517, "grad_norm": 17.275650024414062, "learning_rate": 2.4000000000000003e-06, "loss": 1.2023, "step": 125 }, { "epoch": 0.4658385093167702, "grad_norm": 17.702608108520508, "learning_rate": 2.9e-06, "loss": 1.0835, "step": 150 }, { "epoch": 0.5434782608695652, "grad_norm": 15.095105171203613, "learning_rate": 3.4000000000000005e-06, "loss": 1.1593, "step": 175 }, { "epoch": 0.6211180124223602, "grad_norm": 16.568078994750977, "learning_rate": 3.900000000000001e-06, "loss": 0.956, "step": 200 }, { "epoch": 0.6211180124223602, "eval_loss": 0.8870997428894043, "eval_runtime": 276.5554, "eval_samples_per_second": 1.595, "eval_steps_per_second": 0.799, "eval_wer": 0.6443856696321231, "step": 200 }, { "epoch": 0.6987577639751553, "grad_norm": 17.126779556274414, "learning_rate": 4.4e-06, "loss": 1.0133, "step": 225 }, { "epoch": 0.7763975155279503, "grad_norm": 15.565762519836426, "learning_rate": 4.9000000000000005e-06, "loss": 1.0108, "step": 250 }, { "epoch": 0.8540372670807453, "grad_norm": 19.874563217163086, "learning_rate": 5.400000000000001e-06, "loss": 0.792, "step": 275 }, { "epoch": 0.9316770186335404, "grad_norm": 13.371776580810547, "learning_rate": 5.9e-06, "loss": 0.7788, "step": 300 }, { "epoch": 1.0093167701863355, "grad_norm": 13.305375099182129, "learning_rate": 6.4000000000000006e-06, "loss": 0.8068, "step": 325 }, { "epoch": 1.0869565217391304, "grad_norm": 16.924894332885742, "learning_rate": 6.9e-06, "loss": 0.5933, "step": 350 }, { "epoch": 1.1645962732919255, "grad_norm": 13.33420467376709, "learning_rate": 7.4e-06, "loss": 0.7016, "step": 375 }, { "epoch": 1.2422360248447206, "grad_norm": 15.580875396728516, "learning_rate": 7.9e-06, "loss": 0.6779, "step": 400 }, { "epoch": 1.2422360248447206, "eval_loss": 0.7327317595481873, "eval_runtime": 265.8594, "eval_samples_per_second": 1.659, "eval_steps_per_second": 0.831, "eval_wer": 0.5585477278191873, "step": 400 }, { "epoch": 1.3198757763975155, "grad_norm": 14.909109115600586, "learning_rate": 8.400000000000001e-06, "loss": 0.6421, "step": 425 }, { "epoch": 1.3975155279503104, "grad_norm": 15.768739700317383, "learning_rate": 8.900000000000001e-06, "loss": 0.6492, "step": 450 }, { "epoch": 1.4751552795031055, "grad_norm": 10.104466438293457, "learning_rate": 9.4e-06, "loss": 0.6439, "step": 475 }, { "epoch": 1.5527950310559007, "grad_norm": 12.118277549743652, "learning_rate": 9.9e-06, "loss": 0.6893, "step": 500 }, { "epoch": 1.6304347826086958, "grad_norm": 11.13495922088623, "learning_rate": 9.955555555555556e-06, "loss": 0.6307, "step": 525 }, { "epoch": 1.7080745341614907, "grad_norm": 12.645089149475098, "learning_rate": 9.9e-06, "loss": 0.603, "step": 550 }, { "epoch": 1.7857142857142856, "grad_norm": 17.46633529663086, "learning_rate": 9.844444444444446e-06, "loss": 0.657, "step": 575 }, { "epoch": 1.8633540372670807, "grad_norm": 8.319685935974121, "learning_rate": 9.78888888888889e-06, "loss": 0.5879, "step": 600 }, { "epoch": 1.8633540372670807, "eval_loss": 0.6628760695457458, "eval_runtime": 270.2431, "eval_samples_per_second": 1.632, "eval_steps_per_second": 0.818, "eval_wer": 0.516229862947824, "step": 600 }, { "epoch": 1.9409937888198758, "grad_norm": 11.236241340637207, "learning_rate": 9.733333333333334e-06, "loss": 0.6359, "step": 625 }, { "epoch": 2.018633540372671, "grad_norm": 13.337384223937988, "learning_rate": 9.677777777777778e-06, "loss": 0.5992, "step": 650 }, { "epoch": 2.0962732919254656, "grad_norm": 8.325053215026855, "learning_rate": 9.622222222222222e-06, "loss": 0.3897, "step": 675 }, { "epoch": 2.1739130434782608, "grad_norm": 12.123583793640137, "learning_rate": 9.566666666666668e-06, "loss": 0.4313, "step": 700 }, { "epoch": 2.251552795031056, "grad_norm": 13.106657028198242, "learning_rate": 9.511111111111112e-06, "loss": 0.3598, "step": 725 }, { "epoch": 2.329192546583851, "grad_norm": 8.855884552001953, "learning_rate": 9.455555555555557e-06, "loss": 0.4473, "step": 750 }, { "epoch": 2.406832298136646, "grad_norm": 8.3619966506958, "learning_rate": 9.4e-06, "loss": 0.3956, "step": 775 }, { "epoch": 2.4844720496894412, "grad_norm": 9.356673240661621, "learning_rate": 9.344444444444446e-06, "loss": 0.4268, "step": 800 }, { "epoch": 2.4844720496894412, "eval_loss": 0.6734069585800171, "eval_runtime": 275.7178, "eval_samples_per_second": 1.599, "eval_steps_per_second": 0.802, "eval_wer": 0.5006011060351045, "step": 800 }, { "epoch": 2.562111801242236, "grad_norm": 6.666379928588867, "learning_rate": 9.28888888888889e-06, "loss": 0.389, "step": 825 }, { "epoch": 2.639751552795031, "grad_norm": 11.296941757202148, "learning_rate": 9.233333333333334e-06, "loss": 0.3558, "step": 850 }, { "epoch": 2.717391304347826, "grad_norm": 14.280022621154785, "learning_rate": 9.17777777777778e-06, "loss": 0.4083, "step": 875 }, { "epoch": 2.795031055900621, "grad_norm": 11.778422355651855, "learning_rate": 9.122222222222223e-06, "loss": 0.4209, "step": 900 }, { "epoch": 2.8726708074534164, "grad_norm": 8.68419075012207, "learning_rate": 9.066666666666667e-06, "loss": 0.3953, "step": 925 }, { "epoch": 2.950310559006211, "grad_norm": 15.859999656677246, "learning_rate": 9.011111111111111e-06, "loss": 0.3805, "step": 950 }, { "epoch": 3.027950310559006, "grad_norm": 6.30320930480957, "learning_rate": 8.955555555555555e-06, "loss": 0.3059, "step": 975 }, { "epoch": 3.1055900621118013, "grad_norm": 6.796067714691162, "learning_rate": 8.900000000000001e-06, "loss": 0.1979, "step": 1000 }, { "epoch": 3.1055900621118013, "eval_loss": 0.7146615982055664, "eval_runtime": 265.096, "eval_samples_per_second": 1.664, "eval_steps_per_second": 0.834, "eval_wer": 0.4765568646309209, "step": 1000 }, { "epoch": 3.1832298136645965, "grad_norm": 6.765524864196777, "learning_rate": 8.844444444444445e-06, "loss": 0.2458, "step": 1025 }, { "epoch": 3.260869565217391, "grad_norm": 6.841522693634033, "learning_rate": 8.788888888888891e-06, "loss": 0.2144, "step": 1050 }, { "epoch": 3.3385093167701863, "grad_norm": 7.854112148284912, "learning_rate": 8.733333333333333e-06, "loss": 0.2255, "step": 1075 }, { "epoch": 3.4161490683229814, "grad_norm": 6.754302501678467, "learning_rate": 8.677777777777779e-06, "loss": 0.2295, "step": 1100 }, { "epoch": 3.4937888198757765, "grad_norm": 10.24082088470459, "learning_rate": 8.622222222222223e-06, "loss": 0.214, "step": 1125 }, { "epoch": 3.571428571428571, "grad_norm": 8.713047981262207, "learning_rate": 8.566666666666667e-06, "loss": 0.2529, "step": 1150 }, { "epoch": 3.6490683229813663, "grad_norm": 10.281318664550781, "learning_rate": 8.511111111111113e-06, "loss": 0.2617, "step": 1175 }, { "epoch": 3.7267080745341614, "grad_norm": 9.869316101074219, "learning_rate": 8.455555555555555e-06, "loss": 0.2338, "step": 1200 }, { "epoch": 3.7267080745341614, "eval_loss": 0.7034153938293457, "eval_runtime": 264.3393, "eval_samples_per_second": 1.668, "eval_steps_per_second": 0.836, "eval_wer": 0.48689588843471987, "step": 1200 }, { "epoch": 3.8043478260869565, "grad_norm": 4.6300835609436035, "learning_rate": 8.400000000000001e-06, "loss": 0.2299, "step": 1225 }, { "epoch": 3.8819875776397517, "grad_norm": 6.275107383728027, "learning_rate": 8.344444444444445e-06, "loss": 0.1803, "step": 1250 }, { "epoch": 3.9596273291925463, "grad_norm": 7.057643413543701, "learning_rate": 8.288888888888889e-06, "loss": 0.2024, "step": 1275 }, { "epoch": 4.037267080745342, "grad_norm": 7.044947147369385, "learning_rate": 8.233333333333335e-06, "loss": 0.1694, "step": 1300 }, { "epoch": 4.114906832298137, "grad_norm": 5.649941921234131, "learning_rate": 8.177777777777779e-06, "loss": 0.1026, "step": 1325 }, { "epoch": 4.192546583850931, "grad_norm": 6.779381275177002, "learning_rate": 8.122222222222223e-06, "loss": 0.1207, "step": 1350 }, { "epoch": 4.270186335403727, "grad_norm": 4.534013271331787, "learning_rate": 8.066666666666667e-06, "loss": 0.081, "step": 1375 }, { "epoch": 4.3478260869565215, "grad_norm": 10.306833267211914, "learning_rate": 8.011111111111113e-06, "loss": 0.1014, "step": 1400 }, { "epoch": 4.3478260869565215, "eval_loss": 0.7553574442863464, "eval_runtime": 263.3312, "eval_samples_per_second": 1.675, "eval_steps_per_second": 0.839, "eval_wer": 0.4878576580908872, "step": 1400 }, { "epoch": 4.3478260869565215, "step": 1400, "total_flos": 1.1430774964224e+19, "train_loss": 0.6237195907320295, "train_runtime": 4683.4552, "train_samples_per_second": 8.541, "train_steps_per_second": 1.068 } ], "logging_steps": 25, "max_steps": 5000, "num_input_tokens_seen": 0, "num_train_epochs": 16, "save_steps": 200, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 4, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 4 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.1430774964224e+19, "train_batch_size": 2, "trial_name": null, "trial_params": null }