base_model: deepseek-ai/DeepSeek-R1-Distill-Llama-70B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer trust_remote_code: true load_in_8bit: false load_in_4bit: true strict: false datasets: - path: ugaoo/transformed_short_answer_dataset_prompt type: alpaca val_set_size: 0 output_dir: ./out/transformed_short_answer_dataset_prompt_deepseek70bllama sequence_len: 4000 sample_packing: true pad_to_sequence_len: true adapter: qlora lora_r: 256 lora_alpha: 512 lora_dropout: 0.05 lora_target_linear: true lora_target_modules: - q_proj - k_proj - v_proj - o_proj - up_proj - down_proj - gate_proj lora_modules_to_save: - embed_tokens - lm_head wandb_project: cosmosearch wandb_entity: wandb_watch: wandb_name: transformed_short_answer_dataset_prompt_deepseek70bllama wandb_log_model: gradient_accumulation_steps: 3 micro_batch_size: 4 num_epochs: 6 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 5e-6 train_on_inputs: false group_by_length: false bf16: auto fp16: false tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 100 evals_per_epoch: 6 eval_table_size: saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: save_total_limit: 6 special_tokens: pad_token: <|end▁of▁sentence|>