|
{ |
|
"best_metric": 0.9586206896551724, |
|
"best_model_checkpoint": "videomae-base-finetuned-soccer-action-recognition/checkpoint-1790", |
|
"epoch": 31.03125, |
|
"eval_steps": 500, |
|
"global_step": 2728, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.4652014652014653e-05, |
|
"loss": 1.7115, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.4, |
|
"eval_loss": 1.4195746183395386, |
|
"eval_runtime": 150.0417, |
|
"eval_samples_per_second": 0.966, |
|
"eval_steps_per_second": 0.966, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.9304029304029305e-05, |
|
"loss": 1.0097, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_accuracy": 0.6758620689655173, |
|
"eval_loss": 0.7807376980781555, |
|
"eval_runtime": 30.3768, |
|
"eval_samples_per_second": 4.773, |
|
"eval_steps_per_second": 4.773, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.3956043956043955e-05, |
|
"loss": 0.6192, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"eval_accuracy": 0.7034482758620689, |
|
"eval_loss": 0.795186460018158, |
|
"eval_runtime": 30.3396, |
|
"eval_samples_per_second": 4.779, |
|
"eval_steps_per_second": 4.779, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.904276985743381e-05, |
|
"loss": 0.4713, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"eval_accuracy": 0.7931034482758621, |
|
"eval_loss": 0.6536410450935364, |
|
"eval_runtime": 28.0955, |
|
"eval_samples_per_second": 5.161, |
|
"eval_steps_per_second": 5.161, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 4.7413441955193486e-05, |
|
"loss": 0.3973, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"eval_accuracy": 0.8689655172413793, |
|
"eval_loss": 0.3637762665748596, |
|
"eval_runtime": 28.0719, |
|
"eval_samples_per_second": 5.165, |
|
"eval_steps_per_second": 5.165, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 4.578411405295316e-05, |
|
"loss": 0.3633, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"eval_accuracy": 0.896551724137931, |
|
"eval_loss": 0.36160850524902344, |
|
"eval_runtime": 28.2429, |
|
"eval_samples_per_second": 5.134, |
|
"eval_steps_per_second": 5.134, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 4.415478615071284e-05, |
|
"loss": 0.2336, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"eval_accuracy": 0.896551724137931, |
|
"eval_loss": 0.4579198360443115, |
|
"eval_runtime": 30.0911, |
|
"eval_samples_per_second": 4.819, |
|
"eval_steps_per_second": 4.819, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 4.2525458248472504e-05, |
|
"loss": 0.1997, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"eval_accuracy": 0.6068965517241379, |
|
"eval_loss": 1.597042202949524, |
|
"eval_runtime": 27.9534, |
|
"eval_samples_per_second": 5.187, |
|
"eval_steps_per_second": 5.187, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 4.089613034623218e-05, |
|
"loss": 0.2738, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"eval_accuracy": 0.8689655172413793, |
|
"eval_loss": 0.4101611375808716, |
|
"eval_runtime": 27.8973, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 3.9266802443991856e-05, |
|
"loss": 0.2492, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"eval_accuracy": 0.8344827586206897, |
|
"eval_loss": 0.7651154398918152, |
|
"eval_runtime": 29.2625, |
|
"eval_samples_per_second": 4.955, |
|
"eval_steps_per_second": 4.955, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 3.763747454175153e-05, |
|
"loss": 0.1568, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"eval_accuracy": 0.8137931034482758, |
|
"eval_loss": 0.8560731410980225, |
|
"eval_runtime": 30.5796, |
|
"eval_samples_per_second": 4.742, |
|
"eval_steps_per_second": 4.742, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 3.60081466395112e-05, |
|
"loss": 0.1856, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"eval_accuracy": 0.9241379310344827, |
|
"eval_loss": 0.2810556888580322, |
|
"eval_runtime": 27.9183, |
|
"eval_samples_per_second": 5.194, |
|
"eval_steps_per_second": 5.194, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 3.437881873727088e-05, |
|
"loss": 0.1296, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 12.03, |
|
"eval_accuracy": 0.9172413793103448, |
|
"eval_loss": 0.3444097340106964, |
|
"eval_runtime": 29.2588, |
|
"eval_samples_per_second": 4.956, |
|
"eval_steps_per_second": 4.956, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 3.274949083503055e-05, |
|
"loss": 0.0782, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"eval_accuracy": 0.9241379310344827, |
|
"eval_loss": 0.34232720732688904, |
|
"eval_runtime": 29.8187, |
|
"eval_samples_per_second": 4.863, |
|
"eval_steps_per_second": 4.863, |
|
"step": 1193 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 3.1120162932790225e-05, |
|
"loss": 0.14, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"eval_accuracy": 0.9241379310344827, |
|
"eval_loss": 0.31215447187423706, |
|
"eval_runtime": 30.9503, |
|
"eval_samples_per_second": 4.685, |
|
"eval_steps_per_second": 4.685, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.94908350305499e-05, |
|
"loss": 0.0802, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 2.786150712830957e-05, |
|
"loss": 0.0689, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"eval_accuracy": 0.9172413793103448, |
|
"eval_loss": 0.35335081815719604, |
|
"eval_runtime": 30.933, |
|
"eval_samples_per_second": 4.688, |
|
"eval_steps_per_second": 4.688, |
|
"step": 1364 |
|
}, |
|
{ |
|
"epoch": 16.03, |
|
"learning_rate": 2.6232179226069247e-05, |
|
"loss": 0.036, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 16.03, |
|
"eval_accuracy": 0.9103448275862069, |
|
"eval_loss": 0.4814639985561371, |
|
"eval_runtime": 30.7939, |
|
"eval_samples_per_second": 4.709, |
|
"eval_steps_per_second": 4.709, |
|
"step": 1449 |
|
}, |
|
{ |
|
"epoch": 17.03, |
|
"learning_rate": 2.4602851323828923e-05, |
|
"loss": 0.0695, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 17.03, |
|
"eval_accuracy": 0.8827586206896552, |
|
"eval_loss": 0.5698482394218445, |
|
"eval_runtime": 30.7434, |
|
"eval_samples_per_second": 4.716, |
|
"eval_steps_per_second": 4.716, |
|
"step": 1534 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 2.29735234215886e-05, |
|
"loss": 0.0618, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"eval_accuracy": 0.9310344827586207, |
|
"eval_loss": 0.3052562177181244, |
|
"eval_runtime": 29.2874, |
|
"eval_samples_per_second": 4.951, |
|
"eval_steps_per_second": 4.951, |
|
"step": 1619 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 2.134419551934827e-05, |
|
"loss": 0.0553, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"eval_accuracy": 0.9241379310344827, |
|
"eval_loss": 0.3443007469177246, |
|
"eval_runtime": 30.1383, |
|
"eval_samples_per_second": 4.811, |
|
"eval_steps_per_second": 4.811, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 20.02, |
|
"learning_rate": 1.9714867617107944e-05, |
|
"loss": 0.0301, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 20.03, |
|
"eval_accuracy": 0.9586206896551724, |
|
"eval_loss": 0.1426996886730194, |
|
"eval_runtime": 29.78, |
|
"eval_samples_per_second": 4.869, |
|
"eval_steps_per_second": 4.869, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 1.808553971486762e-05, |
|
"loss": 0.0412, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 21.03, |
|
"eval_accuracy": 0.8689655172413793, |
|
"eval_loss": 0.5618650317192078, |
|
"eval_runtime": 29.9124, |
|
"eval_samples_per_second": 4.847, |
|
"eval_steps_per_second": 4.847, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 22.02, |
|
"learning_rate": 1.6456211812627292e-05, |
|
"loss": 0.0492, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 22.03, |
|
"eval_accuracy": 0.8896551724137931, |
|
"eval_loss": 0.5700664520263672, |
|
"eval_runtime": 27.7486, |
|
"eval_samples_per_second": 5.225, |
|
"eval_steps_per_second": 5.225, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 23.01, |
|
"learning_rate": 1.4826883910386965e-05, |
|
"loss": 0.0171, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 23.03, |
|
"eval_accuracy": 0.8689655172413793, |
|
"eval_loss": 0.6377372145652771, |
|
"eval_runtime": 29.1792, |
|
"eval_samples_per_second": 4.969, |
|
"eval_steps_per_second": 4.969, |
|
"step": 2046 |
|
}, |
|
{ |
|
"epoch": 24.01, |
|
"learning_rate": 1.3197556008146641e-05, |
|
"loss": 0.0181, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 24.03, |
|
"eval_accuracy": 0.8827586206896552, |
|
"eval_loss": 0.5981259346008301, |
|
"eval_runtime": 30.3638, |
|
"eval_samples_per_second": 4.775, |
|
"eval_steps_per_second": 4.775, |
|
"step": 2131 |
|
}, |
|
{ |
|
"epoch": 25.01, |
|
"learning_rate": 1.1568228105906315e-05, |
|
"loss": 0.0305, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 25.03, |
|
"eval_accuracy": 0.9448275862068966, |
|
"eval_loss": 0.3177809715270996, |
|
"eval_runtime": 30.2408, |
|
"eval_samples_per_second": 4.795, |
|
"eval_steps_per_second": 4.795, |
|
"step": 2216 |
|
}, |
|
{ |
|
"epoch": 26.01, |
|
"learning_rate": 9.938900203665988e-06, |
|
"loss": 0.0393, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 26.03, |
|
"eval_accuracy": 0.9103448275862069, |
|
"eval_loss": 0.5434169173240662, |
|
"eval_runtime": 29.452, |
|
"eval_samples_per_second": 4.923, |
|
"eval_steps_per_second": 4.923, |
|
"step": 2301 |
|
}, |
|
{ |
|
"epoch": 27.01, |
|
"learning_rate": 8.309572301425662e-06, |
|
"loss": 0.0248, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 27.03, |
|
"eval_accuracy": 0.9241379310344827, |
|
"eval_loss": 0.40973013639450073, |
|
"eval_runtime": 30.9246, |
|
"eval_samples_per_second": 4.689, |
|
"eval_steps_per_second": 4.689, |
|
"step": 2387 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 6.6802443991853366e-06, |
|
"loss": 0.0146, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 28.03, |
|
"eval_accuracy": 0.9103448275862069, |
|
"eval_loss": 0.44273480772972107, |
|
"eval_runtime": 29.0928, |
|
"eval_samples_per_second": 4.984, |
|
"eval_steps_per_second": 4.984, |
|
"step": 2472 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 5.05091649694501e-06, |
|
"loss": 0.012, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 29.03, |
|
"eval_accuracy": 0.903448275862069, |
|
"eval_loss": 0.5618546605110168, |
|
"eval_runtime": 29.5423, |
|
"eval_samples_per_second": 4.908, |
|
"eval_steps_per_second": 4.908, |
|
"step": 2557 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.4215885947046847e-06, |
|
"loss": 0.0036, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 30.03, |
|
"learning_rate": 1.7922606924643586e-06, |
|
"loss": 0.0065, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 30.03, |
|
"eval_accuracy": 0.9103448275862069, |
|
"eval_loss": 0.5383774042129517, |
|
"eval_runtime": 30.5686, |
|
"eval_samples_per_second": 4.743, |
|
"eval_steps_per_second": 4.743, |
|
"step": 2642 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"learning_rate": 1.629327902240326e-07, |
|
"loss": 0.009, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"eval_accuracy": 0.9172413793103448, |
|
"eval_loss": 0.5013585686683655, |
|
"eval_runtime": 30.4753, |
|
"eval_samples_per_second": 4.758, |
|
"eval_steps_per_second": 4.758, |
|
"step": 2728 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"step": 2728, |
|
"total_flos": 2.719508368433401e+19, |
|
"train_loss": 0.20195167288498247, |
|
"train_runtime": 11757.4892, |
|
"train_samples_per_second": 1.856, |
|
"train_steps_per_second": 0.232 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"eval_accuracy": 0.9470198675496688, |
|
"eval_loss": 0.2553568482398987, |
|
"eval_runtime": 163.4914, |
|
"eval_samples_per_second": 0.924, |
|
"eval_steps_per_second": 0.924, |
|
"step": 2728 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"eval_accuracy": 0.9470198675496688, |
|
"eval_loss": 0.2553568482398987, |
|
"eval_runtime": 31.773, |
|
"eval_samples_per_second": 4.752, |
|
"eval_steps_per_second": 4.752, |
|
"step": 2728 |
|
} |
|
], |
|
"logging_steps": 80, |
|
"max_steps": 2728, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 2.719508368433401e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|