LLaMA-3.1-8B-Infinity3M-Kobo / training_args.yaml
g4rg's picture
Upload folder using huggingface_hub
2fe264d verified
raw
history blame contribute delete
818 Bytes
bf16: true
cutoff_len: 32768
dataset: bookgen,infinity3m-cleaned
dataset_dir: data
ddp_timeout: 180000000
deepspeed: cache/ds_z3_config.json
do_train: true
eval_steps: 250
eval_strategy: steps
finetuning_type: full
flash_attn: fa2
gradient_accumulation_steps: 4
include_num_input_tokens_seen: true
learning_rate: 5.0e-06
logging_steps: 1
lr_scheduler_type: cosine
max_grad_norm: 1.0
max_samples: 10000000
model_name_or_path: unsloth/Meta-Llama-3.1-8B
neat_packing: true
num_train_epochs: 3.0
optim: paged_adamw_8bit
output_dir: saves/LLaMA3.1-8B/full/L3.1-bookgen-infinity2m-32k-fft
packing: true
per_device_eval_batch_size: 1
per_device_train_batch_size: 1
plot_loss: true
preprocessing_num_workers: 16
report_to: none
save_steps: 250
stage: sft
template: alpaca
train_on_prompt: true
val_size: 0.02
warmup_steps: 25