|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.5, |
|
"eval_steps": 500, |
|
"global_step": 175, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 5.114527702331543, |
|
"learning_rate": 0.00019998209226697376, |
|
"loss": 0.7202, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 3.297234296798706, |
|
"learning_rate": 0.00019784091409455728, |
|
"loss": 0.3157, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 1.1975072622299194, |
|
"learning_rate": 0.00019220586030376134, |
|
"loss": 0.216, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 1.313912034034729, |
|
"learning_rate": 0.00018327815731637612, |
|
"loss": 0.1807, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.9035260677337646, |
|
"learning_rate": 0.0001713766112687139, |
|
"loss": 0.1576, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 1.169536828994751, |
|
"learning_rate": 0.00015692622352080662, |
|
"loss": 0.1362, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.7937868237495422, |
|
"learning_rate": 0.0001404430139595877, |
|
"loss": 0.1172, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.6412622928619385, |
|
"learning_rate": 0.00012251559405226941, |
|
"loss": 0.1065, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.6135754585266113, |
|
"learning_rate": 0.00010378414767176705, |
|
"loss": 0.0952, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.6013264060020447, |
|
"learning_rate": 8.491757028386263e-05, |
|
"loss": 0.1005, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 0.7380222082138062, |
|
"learning_rate": 6.658958285026102e-05, |
|
"loss": 0.0794, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.42198696732521057, |
|
"learning_rate": 4.945467341434195e-05, |
|
"loss": 0.1008, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.3692607879638672, |
|
"learning_rate": 3.4124725489820645e-05, |
|
"loss": 0.0545, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.5357337594032288, |
|
"learning_rate": 2.1147167846963422e-05, |
|
"loss": 0.0585, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.45641225576400757, |
|
"learning_rate": 1.0985425962260343e-05, |
|
"loss": 0.0608, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.21990008652210236, |
|
"learning_rate": 4.002373205607723e-06, |
|
"loss": 0.0544, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 0.41038212180137634, |
|
"learning_rate": 4.4737271914411236e-07, |
|
"loss": 0.048, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"step": 175, |
|
"total_flos": 6343017759525312.0, |
|
"train_loss": 0.15042741894721984, |
|
"train_runtime": 99.7314, |
|
"train_samples_per_second": 28.075, |
|
"train_steps_per_second": 1.755 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 175, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6343017759525312.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|