from transformers import AutoModelForCausalLM, TrainingArguments, Trainer | |
model = AutoModelForCausalLM.from_pretrained("Isotonic/gpt-human-assistant") | |
training_args = TrainingArguments( | |
output_dir="./results", | |
evaluation_strategy="epoch", | |
learning_rate=2e-5, | |
per_device_train_batch_size=8, | |
num_train_epochs=3, | |
save_total_limit=2, | |
) | |
trainer = Trainer( | |
model=model, | |
args=training_args, | |
train_dataset=tokenized_datasets["train"], | |
eval_dataset=tokenized_datasets["test"], | |
) | |
trainer.train() | |