nabil-tazi's picture
Upload folder using huggingface_hub
a28ecf2 verified
{
"best_metric": 0.4411565661430359,
"best_model_checkpoint": "autotrain-3c51k-w49bn/checkpoint-69",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 69,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17391304347826086,
"grad_norm": 3.44027042388916,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.7869,
"step": 4
},
{
"epoch": 0.34782608695652173,
"grad_norm": 1.9074159860610962,
"learning_rate": 2.9516129032258067e-05,
"loss": 0.7022,
"step": 8
},
{
"epoch": 0.5217391304347826,
"grad_norm": 3.345827579498291,
"learning_rate": 2.758064516129032e-05,
"loss": 0.6482,
"step": 12
},
{
"epoch": 0.6956521739130435,
"grad_norm": 2.091654062271118,
"learning_rate": 2.5645161290322582e-05,
"loss": 0.5571,
"step": 16
},
{
"epoch": 0.8695652173913043,
"grad_norm": 1.436171054840088,
"learning_rate": 2.370967741935484e-05,
"loss": 0.5698,
"step": 20
},
{
"epoch": 1.0,
"eval_loss": 0.5249526500701904,
"eval_runtime": 3.0089,
"eval_samples_per_second": 60.487,
"eval_steps_per_second": 3.988,
"step": 23
},
{
"epoch": 1.0434782608695652,
"grad_norm": 1.838670015335083,
"learning_rate": 2.1774193548387097e-05,
"loss": 0.4771,
"step": 24
},
{
"epoch": 1.2173913043478262,
"grad_norm": 1.2238214015960693,
"learning_rate": 1.9838709677419355e-05,
"loss": 0.444,
"step": 28
},
{
"epoch": 1.391304347826087,
"grad_norm": 2.102836847305298,
"learning_rate": 1.7903225806451616e-05,
"loss": 0.6149,
"step": 32
},
{
"epoch": 1.5652173913043477,
"grad_norm": 1.8485645055770874,
"learning_rate": 1.596774193548387e-05,
"loss": 0.5523,
"step": 36
},
{
"epoch": 1.7391304347826086,
"grad_norm": 2.7359752655029297,
"learning_rate": 1.403225806451613e-05,
"loss": 0.4806,
"step": 40
},
{
"epoch": 1.9130434782608696,
"grad_norm": 1.4217264652252197,
"learning_rate": 1.2096774193548387e-05,
"loss": 0.4623,
"step": 44
},
{
"epoch": 2.0,
"eval_loss": 0.46536698937416077,
"eval_runtime": 3.1677,
"eval_samples_per_second": 57.456,
"eval_steps_per_second": 3.788,
"step": 46
},
{
"epoch": 2.0869565217391304,
"grad_norm": 1.995253562927246,
"learning_rate": 1.0161290322580644e-05,
"loss": 0.4039,
"step": 48
},
{
"epoch": 2.260869565217391,
"grad_norm": 2.4988834857940674,
"learning_rate": 8.225806451612904e-06,
"loss": 0.47,
"step": 52
},
{
"epoch": 2.4347826086956523,
"grad_norm": 1.7685775756835938,
"learning_rate": 6.290322580645162e-06,
"loss": 0.3878,
"step": 56
},
{
"epoch": 2.608695652173913,
"grad_norm": 2.7405200004577637,
"learning_rate": 4.35483870967742e-06,
"loss": 0.5158,
"step": 60
},
{
"epoch": 2.782608695652174,
"grad_norm": 3.4083738327026367,
"learning_rate": 2.4193548387096776e-06,
"loss": 0.5203,
"step": 64
},
{
"epoch": 2.9565217391304346,
"grad_norm": 1.6851589679718018,
"learning_rate": 4.838709677419355e-07,
"loss": 0.4446,
"step": 68
},
{
"epoch": 3.0,
"eval_loss": 0.4411565661430359,
"eval_runtime": 1.7496,
"eval_samples_per_second": 104.022,
"eval_steps_per_second": 6.859,
"step": 69
}
],
"logging_steps": 4,
"max_steps": 69,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}