| base_model: EleutherAI/pythia-1.4b-deduped | |
| base_model_config: EleutherAI/pythia-1.4b-deduped | |
| load_in_8bit: true | |
| datasets: | |
| - path: teknium/GPT4-LLM-Cleaned | |
| type: alpaca | |
| dataset_prepared_path: | |
| val_set_size: 0.05 | |
| adapter: lora | |
| lora_model_dir: | |
| sequence_len: 512 | |
| lora_r: 16 | |
| lora_alpha: 32 | |
| lora_dropout: 0.05 | |
| lora_target_modules: | |
| - query_key_value | |
| lora_target_linear: | |
| lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific | |
| wandb_project: | |
| wandb_entity: | |
| wandb_watch: | |
| wandb_run_id: | |
| wandb_log_model: | |
| output_dir: ./lora-alpaca-pythia | |
| gradient_accumulation_steps: 1 | |
| micro_batch_size: 4 | |
| num_epochs: 3 | |
| learning_rate: 0.00001 | |
| train_on_inputs: false | |
| group_by_length: false | |
| bf16: True | |
| tf32: True | |
| early_stopping_patience: | |
| resume_from_checkpoint: | |
| local_rank: | |
| weight_decay: 0.1 | |
| eval_steps: 20 | |
| logging_steps: 1 | |