{ "_name_or_path": "/home/sakbarian/llama/experiments_lora/checkpoint-300", "architectures": [ "LlamaForCausalLM" ], "best_metric": 1.0994387865066528, "best_model_checkpoint": "experiments_lora/checkpoint-150", "bos_token_id": 1, "eos_token_id": 2, "epoch": 24.0, "global_step": 150, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 11008, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.6, "learning_rate": 2.9999999999999997e-05, "loss": 2.621, "step": 10 }, { "epoch": 3.2, "learning_rate": 5.9999999999999995e-05, "loss": 2.5081, "step": 20 }, { "epoch": 4.8, "learning_rate": 8.999999999999999e-05, "loss": 2.2368, "step": 30 }, { "epoch": 6.4, "learning_rate": 0.00011399999999999999, "loss": 1.6603, "step": 40 }, { "epoch": 8.0, "learning_rate": 0.00014399999999999998, "loss": 1.304, "step": 50 }, { "epoch": 8.0, "eval_loss": 1.234856128692627, "eval_runtime": 8.6383, "eval_samples_per_second": 23.153, "eval_steps_per_second": 2.894, "step": 50 }, { "epoch": 9.6, "learning_rate": 0.00017399999999999997, "loss": 1.2076, "step": 60 }, { "epoch": 11.2, "learning_rate": 0.000204, "loss": 1.1688, "step": 70 }, { "epoch": 12.8, "learning_rate": 0.000234, "loss": 1.1471, "step": 80 }, { "epoch": 14.4, "learning_rate": 0.00026399999999999997, "loss": 1.1364, "step": 90 }, { "epoch": 16.0, "learning_rate": 0.000294, "loss": 1.1284, "step": 100 }, { "epoch": 16.0, "eval_loss": 1.1279268264770508, "eval_runtime": 8.6373, "eval_samples_per_second": 23.155, "eval_steps_per_second": 2.894, "step": 100 }, { "epoch": 17.6, "learning_rate": 0.00028799999999999995, "loss": 1.1212, "step": 110 }, { "epoch": 19.2, "learning_rate": 0.00027299999999999997, "loss": 1.1089, "step": 120 }, { "epoch": 20.8, "learning_rate": 0.000258, "loss": 1.1044, "step": 130 }, { "epoch": 22.4, "learning_rate": 0.000243, "loss": 1.082, "step": 140 }, { "epoch": 24.0, "learning_rate": 0.00022799999999999999, "loss": 1.0521, "step": 150 }, { "epoch": 24.0, "eval_loss": 1.0994387865066528, "eval_runtime": 8.6221, "eval_samples_per_second": 23.196, "eval_steps_per_second": 2.9, "step": 150 } ], "max_position_embeddings": 2048, "max_steps": 300, "model_type": "llama", "num_attention_heads": 32, "num_hidden_layers": 32, "num_train_epochs": 50, "pad_token_id": 0, "quantization_config": { "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_skip_modules": null, "llm_int8_threshold": 6.0, "load_in_8bit": true }, "rms_norm_eps": 1e-06, "tie_word_embeddings": false, "torch_dtype": "float16", "total_flos": 1.435518270111744e+17, "transformers_version": "4.29.0.dev0", "trial_name": null, "trial_params": null, "use_cache": true, "vocab_size": 32000 }