|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.033783783783783786, |
|
"grad_norm": 8.114141209696948, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.8109, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06756756756756757, |
|
"grad_norm": 15.984610470967208, |
|
"learning_rate": 4.9977343997179584e-05, |
|
"loss": 0.569, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10135135135135136, |
|
"grad_norm": 6.486203461616051, |
|
"learning_rate": 4.97229876633906e-05, |
|
"loss": 0.4567, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 4.827853937931762, |
|
"learning_rate": 4.91891643656567e-05, |
|
"loss": 0.3269, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16891891891891891, |
|
"grad_norm": 15.84631605931727, |
|
"learning_rate": 4.838258724167838e-05, |
|
"loss": 0.4273, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20270270270270271, |
|
"grad_norm": 2.8760912276623753, |
|
"learning_rate": 4.731339946677661e-05, |
|
"loss": 0.361, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23648648648648649, |
|
"grad_norm": 2.2048113082064313, |
|
"learning_rate": 4.599504669757798e-05, |
|
"loss": 0.3022, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 2.7630825852517455, |
|
"learning_rate": 4.444410798508125e-05, |
|
"loss": 0.2922, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.30405405405405406, |
|
"grad_norm": 2.1921387790394316, |
|
"learning_rate": 4.268008728347168e-05, |
|
"loss": 0.2562, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.33783783783783783, |
|
"grad_norm": 2.0129512745038576, |
|
"learning_rate": 4.072516817658065e-05, |
|
"loss": 0.261, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3716216216216216, |
|
"grad_norm": 2.0594810872433453, |
|
"learning_rate": 3.860393490644781e-05, |
|
"loss": 0.2945, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.40540540540540543, |
|
"grad_norm": 1.9791833079464716, |
|
"learning_rate": 3.634306321221328e-05, |
|
"loss": 0.2295, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4391891891891892, |
|
"grad_norm": 2.116642693222838, |
|
"learning_rate": 3.397098486722039e-05, |
|
"loss": 0.2588, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.47297297297297297, |
|
"grad_norm": 1.5573192171310222, |
|
"learning_rate": 3.1517530132969326e-05, |
|
"loss": 0.2452, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5067567567567568, |
|
"grad_norm": 1.7299357696784277, |
|
"learning_rate": 2.9013552626270894e-05, |
|
"loss": 0.2702, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.5405405405405406, |
|
"grad_norm": 1.6102605170270818, |
|
"learning_rate": 2.6490541317113427e-05, |
|
"loss": 0.215, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5743243243243243, |
|
"grad_norm": 1.598946897431238, |
|
"learning_rate": 2.3980224536594803e-05, |
|
"loss": 0.2339, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6081081081081081, |
|
"grad_norm": 1.588134782955536, |
|
"learning_rate": 2.1514170974749814e-05, |
|
"loss": 0.2117, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6418918918918919, |
|
"grad_norm": 1.2441724175033286, |
|
"learning_rate": 1.9123392685956238e-05, |
|
"loss": 0.2135, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.6756756756756757, |
|
"grad_norm": 1.4345021992064273, |
|
"learning_rate": 1.6837955094357533e-05, |
|
"loss": 0.177, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7094594594594594, |
|
"grad_norm": 1.1856924570160123, |
|
"learning_rate": 1.468659890370983e-05, |
|
"loss": 0.2133, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.7432432432432432, |
|
"grad_norm": 1.2429313237168909, |
|
"learning_rate": 1.2696378666356468e-05, |
|
"loss": 0.1859, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.777027027027027, |
|
"grad_norm": 1.2054353549516497, |
|
"learning_rate": 1.0892322556534839e-05, |
|
"loss": 0.1643, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.8108108108108109, |
|
"grad_norm": 1.3649942337642955, |
|
"learning_rate": 9.297117626563687e-06, |
|
"loss": 0.2296, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.8445945945945946, |
|
"grad_norm": 1.5853935569724023, |
|
"learning_rate": 7.930824503996856e-06, |
|
"loss": 0.1947, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.8783783783783784, |
|
"grad_norm": 1.2105867106797965, |
|
"learning_rate": 6.810625117592363e-06, |
|
"loss": 0.1814, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9121621621621622, |
|
"grad_norm": 1.2923575683789774, |
|
"learning_rate": 5.950606624589065e-06, |
|
"loss": 0.1556, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.9459459459459459, |
|
"grad_norm": 0.9780632942146374, |
|
"learning_rate": 5.361584256530833e-06, |
|
"loss": 0.186, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9797297297297297, |
|
"grad_norm": 0.8558531397152385, |
|
"learning_rate": 5.050965311454739e-06, |
|
"loss": 0.1568, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 148, |
|
"total_flos": 87185003380736.0, |
|
"train_loss": 0.2745462021714932, |
|
"train_runtime": 3032.1637, |
|
"train_samples_per_second": 3.12, |
|
"train_steps_per_second": 0.049 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 87185003380736.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|