|
{ |
|
"best_metric": 0.049842845648527145, |
|
"best_model_checkpoint": "./trained_models/Llama-3.1-8B-Instruct-CNADFTD-ADNI2NIFD-AN-fold-0-gathered-equally-represented-0234-v2/checkpoint-65", |
|
"epoch": 7.2025316455696204, |
|
"eval_steps": 5, |
|
"global_step": 65, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.20253164556962025, |
|
"grad_norm": 20.267499923706055, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 1.7395, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 19.267681121826172, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 1.5413, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"eval_loss": 0.1354750096797943, |
|
"eval_runtime": 10.1119, |
|
"eval_samples_per_second": 8.9, |
|
"eval_steps_per_second": 2.275, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6075949367088608, |
|
"grad_norm": 9.370927810668945, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.928, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 14.292084693908691, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 1.0332, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.1012658227848102, |
|
"grad_norm": 16.358558654785156, |
|
"learning_rate": 0.00015, |
|
"loss": 0.7069, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.1012658227848102, |
|
"eval_loss": 0.09530380368232727, |
|
"eval_runtime": 10.229, |
|
"eval_samples_per_second": 8.799, |
|
"eval_steps_per_second": 2.249, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.3037974683544304, |
|
"grad_norm": 4.635617733001709, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.7555, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.5063291139240507, |
|
"grad_norm": 5.011940002441406, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.4148, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.6075949367088609, |
|
"eval_loss": 0.07413829118013382, |
|
"eval_runtime": 10.2852, |
|
"eval_samples_per_second": 8.75, |
|
"eval_steps_per_second": 2.236, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.7088607594936709, |
|
"grad_norm": 3.0944323539733887, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.4899, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.9113924050632911, |
|
"grad_norm": 9.65760612487793, |
|
"learning_rate": 0.00027, |
|
"loss": 0.7349, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.2025316455696204, |
|
"grad_norm": 6.6734418869018555, |
|
"learning_rate": 0.0003, |
|
"loss": 0.7657, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.2025316455696204, |
|
"eval_loss": 0.06096666678786278, |
|
"eval_runtime": 10.2992, |
|
"eval_samples_per_second": 8.739, |
|
"eval_steps_per_second": 2.233, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.4050632911392404, |
|
"grad_norm": 4.764143943786621, |
|
"learning_rate": 0.00029939614409928584, |
|
"loss": 0.5037, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.607594936708861, |
|
"grad_norm": 2.2680883407592773, |
|
"learning_rate": 0.00029758943828979444, |
|
"loss": 0.4673, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.708860759493671, |
|
"eval_loss": 0.05865984410047531, |
|
"eval_runtime": 10.3046, |
|
"eval_samples_per_second": 8.734, |
|
"eval_steps_per_second": 2.232, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.810126582278481, |
|
"grad_norm": 4.016191005706787, |
|
"learning_rate": 0.00029459442910437797, |
|
"loss": 0.4189, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 3.1012658227848102, |
|
"grad_norm": 6.397407531738281, |
|
"learning_rate": 0.00029043523059596053, |
|
"loss": 0.6435, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 3.3037974683544302, |
|
"grad_norm": 5.727280139923096, |
|
"learning_rate": 0.0002851453301853628, |
|
"loss": 0.3936, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.3037974683544302, |
|
"eval_loss": 0.06180719658732414, |
|
"eval_runtime": 10.3031, |
|
"eval_samples_per_second": 8.735, |
|
"eval_steps_per_second": 2.232, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.5063291139240507, |
|
"grad_norm": 4.997829437255859, |
|
"learning_rate": 0.0002787673190402799, |
|
"loss": 0.3523, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.708860759493671, |
|
"grad_norm": 4.903127670288086, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.5887, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.810126582278481, |
|
"eval_loss": 0.05977346748113632, |
|
"eval_runtime": 10.2946, |
|
"eval_samples_per_second": 8.742, |
|
"eval_steps_per_second": 2.234, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.911392405063291, |
|
"grad_norm": 6.872932434082031, |
|
"learning_rate": 0.00026296071990054165, |
|
"loss": 0.4239, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 4.2025316455696204, |
|
"grad_norm": 3.7034521102905273, |
|
"learning_rate": 0.0002536593973480297, |
|
"loss": 0.437, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 4.405063291139241, |
|
"grad_norm": 4.1444244384765625, |
|
"learning_rate": 0.00024352347027881003, |
|
"loss": 0.2975, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.405063291139241, |
|
"eval_loss": 0.06828329712152481, |
|
"eval_runtime": 10.2997, |
|
"eval_samples_per_second": 8.738, |
|
"eval_steps_per_second": 2.233, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.6075949367088604, |
|
"grad_norm": 4.268646717071533, |
|
"learning_rate": 0.00023263454721781537, |
|
"loss": 0.2971, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.810126582278481, |
|
"grad_norm": 9.77409839630127, |
|
"learning_rate": 0.0002210802993709498, |
|
"loss": 0.3884, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.911392405063291, |
|
"eval_loss": 0.0632724016904831, |
|
"eval_runtime": 10.2951, |
|
"eval_samples_per_second": 8.742, |
|
"eval_steps_per_second": 2.234, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 5.10126582278481, |
|
"grad_norm": 12.521153450012207, |
|
"learning_rate": 0.00020895375474808852, |
|
"loss": 0.634, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 5.30379746835443, |
|
"grad_norm": 7.4212541580200195, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.3246, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 5.506329113924051, |
|
"grad_norm": 10.269664764404297, |
|
"learning_rate": 0.00018337814009344714, |
|
"loss": 0.4664, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.506329113924051, |
|
"eval_loss": 0.09271334856748581, |
|
"eval_runtime": 10.2899, |
|
"eval_samples_per_second": 8.746, |
|
"eval_steps_per_second": 2.235, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.708860759493671, |
|
"grad_norm": 6.81392765045166, |
|
"learning_rate": 0.00017013498987264832, |
|
"loss": 0.3999, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.911392405063291, |
|
"grad_norm": 3.8285505771636963, |
|
"learning_rate": 0.00015672972455257723, |
|
"loss": 0.2666, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 6.10126582278481, |
|
"eval_loss": 0.05789216235280037, |
|
"eval_runtime": 10.2896, |
|
"eval_samples_per_second": 8.747, |
|
"eval_steps_per_second": 2.235, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 6.2025316455696204, |
|
"grad_norm": 3.7714805603027344, |
|
"learning_rate": 0.0001432702754474228, |
|
"loss": 0.3953, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 6.405063291139241, |
|
"grad_norm": 2.9867966175079346, |
|
"learning_rate": 0.0001298650101273517, |
|
"loss": 0.227, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 6.6075949367088604, |
|
"grad_norm": 4.1205010414123535, |
|
"learning_rate": 0.00011662185990655284, |
|
"loss": 0.2554, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.6075949367088604, |
|
"eval_loss": 0.061419058591127396, |
|
"eval_runtime": 10.27, |
|
"eval_samples_per_second": 8.763, |
|
"eval_steps_per_second": 2.24, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.810126582278481, |
|
"grad_norm": 2.6789913177490234, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 0.2873, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 7.10126582278481, |
|
"grad_norm": 4.4857683181762695, |
|
"learning_rate": 9.104624525191145e-05, |
|
"loss": 0.2828, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 7.2025316455696204, |
|
"eval_loss": 0.049842845648527145, |
|
"eval_runtime": 10.2776, |
|
"eval_samples_per_second": 8.757, |
|
"eval_steps_per_second": 2.238, |
|
"step": 65 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 90, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.111682980043162e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|