zephyr-backdoor-7b-sft-qlora / trainer_state.json
chloeli's picture
Model save
f579047 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03333333333333333,
"grad_norm": 1.2589778900146484,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.1573,
"mean_token_accuracy": 0.7906571626663208,
"step": 1
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.6721312403678894,
"learning_rate": 0.0001666666666666667,
"loss": 1.0823,
"mean_token_accuracy": 0.7963626533746719,
"step": 5
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.4220508933067322,
"learning_rate": 0.00019730448705798239,
"loss": 0.6866,
"mean_token_accuracy": 0.8463268637657165,
"step": 10
},
{
"epoch": 0.5,
"grad_norm": 0.3032156527042389,
"learning_rate": 0.00018660254037844388,
"loss": 0.5073,
"mean_token_accuracy": 0.8762549757957458,
"step": 15
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.28760814666748047,
"learning_rate": 0.0001686241637868734,
"loss": 0.4286,
"mean_token_accuracy": 0.8931135952472686,
"step": 20
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.44189342856407166,
"learning_rate": 0.00014487991802004623,
"loss": 0.369,
"mean_token_accuracy": 0.9084921300411224,
"step": 25
},
{
"epoch": 1.0,
"grad_norm": 0.25721806287765503,
"learning_rate": 0.00011736481776669306,
"loss": 0.3024,
"mean_token_accuracy": 0.9252148032188415,
"step": 30
},
{
"epoch": 1.0,
"eval_loss": 0.3562483489513397,
"eval_mean_token_accuracy": 0.9118186314900716,
"eval_runtime": 62.9293,
"eval_samples_per_second": 1.907,
"eval_steps_per_second": 0.238,
"step": 30
},
{
"epoch": 1.1666666666666667,
"grad_norm": 0.18542297184467316,
"learning_rate": 8.839070858747697e-05,
"loss": 0.3044,
"mean_token_accuracy": 0.9222678005695343,
"step": 35
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.15972654521465302,
"learning_rate": 6.039202339608432e-05,
"loss": 0.2852,
"mean_token_accuracy": 0.9260704517364502,
"step": 40
},
{
"epoch": 1.5,
"grad_norm": 0.21341268718242645,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.2792,
"mean_token_accuracy": 0.9288078784942627,
"step": 45
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.1644662618637085,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.3164,
"mean_token_accuracy": 0.9177724421024323,
"step": 50
},
{
"epoch": 1.8333333333333335,
"grad_norm": 0.17668558657169342,
"learning_rate": 4.20104876845111e-06,
"loss": 0.3048,
"mean_token_accuracy": 0.9214222669601441,
"step": 55
},
{
"epoch": 2.0,
"grad_norm": 0.22243957221508026,
"learning_rate": 0.0,
"loss": 0.2748,
"mean_token_accuracy": 0.9267385125160217,
"step": 60
},
{
"epoch": 2.0,
"eval_loss": 0.344578355550766,
"eval_mean_token_accuracy": 0.9140905062357585,
"eval_runtime": 62.968,
"eval_samples_per_second": 1.906,
"eval_steps_per_second": 0.238,
"step": 60
},
{
"epoch": 2.0,
"step": 60,
"total_flos": 4.166042403838362e+16,
"train_loss": 0.4296759645144145,
"train_runtime": 1003.3823,
"train_samples_per_second": 0.472,
"train_steps_per_second": 0.06
}
],
"logging_steps": 5,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.166042403838362e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}