|
{ |
|
"best_metric": 1.362037181854248, |
|
"best_model_checkpoint": "data/Gemma-2-2B_task-1_60-samples_config-1/checkpoint-34", |
|
"epoch": 12.869565217391305, |
|
"eval_steps": 500, |
|
"global_step": 74, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": 2.9326045513153076, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.4121, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 2.785604238510132, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.5463, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 2.592984914779663, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.489, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.8695652173913043, |
|
"eval_loss": 2.2686474323272705, |
|
"eval_runtime": 1.0904, |
|
"eval_samples_per_second": 11.005, |
|
"eval_steps_per_second": 11.005, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.0434782608695652, |
|
"grad_norm": 2.600557804107666, |
|
"learning_rate": 2.4e-05, |
|
"loss": 2.3738, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.391304347826087, |
|
"grad_norm": 2.8645849227905273, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 2.3608, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.7391304347826086, |
|
"grad_norm": 2.449917793273926, |
|
"learning_rate": 4e-05, |
|
"loss": 2.1234, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.9130434782608696, |
|
"eval_loss": 1.8447164297103882, |
|
"eval_runtime": 1.0662, |
|
"eval_samples_per_second": 11.255, |
|
"eval_steps_per_second": 11.255, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.0869565217391304, |
|
"grad_norm": 2.242774486541748, |
|
"learning_rate": 4.8e-05, |
|
"loss": 1.9407, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.4347826086956523, |
|
"grad_norm": 1.8629240989685059, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 1.7363, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.782608695652174, |
|
"grad_norm": 1.4939218759536743, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 1.5886, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.9565217391304346, |
|
"eval_loss": 1.5018290281295776, |
|
"eval_runtime": 1.2074, |
|
"eval_samples_per_second": 9.939, |
|
"eval_steps_per_second": 9.939, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 3.130434782608696, |
|
"grad_norm": 1.2370601892471313, |
|
"learning_rate": 7.2e-05, |
|
"loss": 1.471, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.4782608695652173, |
|
"grad_norm": 1.2678393125534058, |
|
"learning_rate": 8e-05, |
|
"loss": 1.3431, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.8260869565217392, |
|
"grad_norm": 1.1590300798416138, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 1.4359, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.3991178274154663, |
|
"eval_runtime": 1.196, |
|
"eval_samples_per_second": 10.033, |
|
"eval_steps_per_second": 10.033, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 4.173913043478261, |
|
"grad_norm": 1.0870388746261597, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.2965, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.521739130434782, |
|
"grad_norm": 0.9513787031173706, |
|
"learning_rate": 9.999512620046522e-05, |
|
"loss": 1.216, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 4.869565217391305, |
|
"grad_norm": 0.9963628649711609, |
|
"learning_rate": 9.995614150494293e-05, |
|
"loss": 1.1595, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.869565217391305, |
|
"eval_loss": 1.3781918287277222, |
|
"eval_runtime": 1.2257, |
|
"eval_samples_per_second": 9.79, |
|
"eval_steps_per_second": 9.79, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.217391304347826, |
|
"grad_norm": 1.009848952293396, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.1039, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.565217391304348, |
|
"grad_norm": 1.2583975791931152, |
|
"learning_rate": 9.976136999909156e-05, |
|
"loss": 1.0074, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 5.913043478260869, |
|
"grad_norm": 1.1578857898712158, |
|
"learning_rate": 9.96057350657239e-05, |
|
"loss": 0.9043, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 5.913043478260869, |
|
"eval_loss": 1.362037181854248, |
|
"eval_runtime": 1.0954, |
|
"eval_samples_per_second": 10.955, |
|
"eval_steps_per_second": 10.955, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 6.260869565217392, |
|
"grad_norm": 1.0541073083877563, |
|
"learning_rate": 9.941141907232765e-05, |
|
"loss": 0.7793, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 6.608695652173913, |
|
"grad_norm": 1.4098272323608398, |
|
"learning_rate": 9.917857354066931e-05, |
|
"loss": 0.7546, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 6.956521739130435, |
|
"grad_norm": 1.4439623355865479, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.773, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 6.956521739130435, |
|
"eval_loss": 1.454060673713684, |
|
"eval_runtime": 1.2372, |
|
"eval_samples_per_second": 9.699, |
|
"eval_steps_per_second": 9.699, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 7.304347826086957, |
|
"grad_norm": 1.3048129081726074, |
|
"learning_rate": 9.859805002892732e-05, |
|
"loss": 0.6603, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 7.6521739130434785, |
|
"grad_norm": 1.3808172941207886, |
|
"learning_rate": 9.825082472361557e-05, |
|
"loss": 0.5411, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.6105766296386719, |
|
"learning_rate": 9.786597487660337e-05, |
|
"loss": 0.4933, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.616339087486267, |
|
"eval_runtime": 1.0573, |
|
"eval_samples_per_second": 11.349, |
|
"eval_steps_per_second": 11.349, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.347826086956522, |
|
"grad_norm": 1.46830153465271, |
|
"learning_rate": 9.744380058222483e-05, |
|
"loss": 0.3801, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 8.695652173913043, |
|
"grad_norm": 1.603589653968811, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.354, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 8.869565217391305, |
|
"eval_loss": 1.9201489686965942, |
|
"eval_runtime": 1.0601, |
|
"eval_samples_per_second": 11.32, |
|
"eval_steps_per_second": 11.32, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 9.043478260869565, |
|
"grad_norm": 1.9514093399047852, |
|
"learning_rate": 9.648882429441257e-05, |
|
"loss": 0.3032, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 9.391304347826088, |
|
"grad_norm": 1.9017702341079712, |
|
"learning_rate": 9.595676696276172e-05, |
|
"loss": 0.2081, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 9.73913043478261, |
|
"grad_norm": 1.7318685054779053, |
|
"learning_rate": 9.538887392664544e-05, |
|
"loss": 0.1478, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 9.91304347826087, |
|
"eval_loss": 2.480901002883911, |
|
"eval_runtime": 1.1179, |
|
"eval_samples_per_second": 10.734, |
|
"eval_steps_per_second": 10.734, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 10.08695652173913, |
|
"grad_norm": 1.9642924070358276, |
|
"learning_rate": 9.478558801197065e-05, |
|
"loss": 0.1671, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 10.434782608695652, |
|
"grad_norm": 1.2309162616729736, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0836, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 10.782608695652174, |
|
"grad_norm": 1.401197910308838, |
|
"learning_rate": 9.347474647526095e-05, |
|
"loss": 0.0676, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 10.956521739130435, |
|
"eval_loss": 2.9183950424194336, |
|
"eval_runtime": 1.0831, |
|
"eval_samples_per_second": 11.079, |
|
"eval_steps_per_second": 11.079, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 11.130434782608695, |
|
"grad_norm": 1.3135957717895508, |
|
"learning_rate": 9.276821300802534e-05, |
|
"loss": 0.0935, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 11.478260869565217, |
|
"grad_norm": 1.3647031784057617, |
|
"learning_rate": 9.202833017478422e-05, |
|
"loss": 0.0463, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 11.826086956521738, |
|
"grad_norm": 0.8959400653839111, |
|
"learning_rate": 9.125567491391476e-05, |
|
"loss": 0.0308, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 3.2972118854522705, |
|
"eval_runtime": 1.2332, |
|
"eval_samples_per_second": 9.731, |
|
"eval_steps_per_second": 9.731, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 12.173913043478262, |
|
"grad_norm": 2.00899338722229, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0286, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 12.521739130434783, |
|
"grad_norm": 2.88161563873291, |
|
"learning_rate": 8.961448216775954e-05, |
|
"loss": 0.0286, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 12.869565217391305, |
|
"grad_norm": 1.5414081811904907, |
|
"learning_rate": 8.874722443520899e-05, |
|
"loss": 0.0342, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 12.869565217391305, |
|
"eval_loss": 3.367413282394409, |
|
"eval_runtime": 1.1245, |
|
"eval_samples_per_second": 10.672, |
|
"eval_steps_per_second": 10.672, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 12.869565217391305, |
|
"step": 74, |
|
"total_flos": 2319598189281280.0, |
|
"train_loss": 0.9195622371862063, |
|
"train_runtime": 286.9716, |
|
"train_samples_per_second": 8.015, |
|
"train_steps_per_second": 0.871 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2319598189281280.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|