|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Fine-tune Qwen3-0.6B on open-r1/codeforces-cots for instruction following. |
|
|
Dataset: Competitive programming with chain-of-thought reasoning. |
|
|
""" |
|
|
|
|
|
import trackio |
|
|
from datasets import load_dataset |
|
|
from peft import LoraConfig |
|
|
from transformers import AutoTokenizer |
|
|
from trl import SFTTrainer, SFTConfig |
|
|
|
|
|
|
|
|
print("Loading tokenizer...") |
|
|
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B") |
|
|
|
|
|
|
|
|
print("Loading dataset open-r1/codeforces-cots...") |
|
|
dataset = load_dataset( |
|
|
"open-r1/codeforces-cots", |
|
|
name="solutions_py_decontaminated", |
|
|
split="train" |
|
|
) |
|
|
print(f"Dataset loaded: {len(dataset)} examples") |
|
|
|
|
|
|
|
|
def preprocess_function(example): |
|
|
"""Apply chat template to convert messages to text format.""" |
|
|
messages = example["messages"] |
|
|
text = tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=False, |
|
|
add_generation_prompt=False |
|
|
) |
|
|
return {"text": text} |
|
|
|
|
|
print("Preprocessing dataset with chat template...") |
|
|
dataset = dataset.map( |
|
|
preprocess_function, |
|
|
remove_columns=dataset.column_names, |
|
|
desc="Applying chat template" |
|
|
) |
|
|
print(f"Preprocessed dataset: {len(dataset)} examples") |
|
|
|
|
|
|
|
|
print("Creating train/eval split...") |
|
|
dataset_split = dataset.train_test_split(test_size=0.05, seed=42) |
|
|
train_dataset = dataset_split["train"] |
|
|
eval_dataset = dataset_split["test"] |
|
|
print(f" Train: {len(train_dataset)} examples") |
|
|
print(f" Eval: {len(eval_dataset)} examples") |
|
|
|
|
|
|
|
|
config = SFTConfig( |
|
|
|
|
|
output_dir="qwen3-0.6b-codeforces-cots", |
|
|
push_to_hub=True, |
|
|
hub_model_id="stmasson/qwen3-0.6b-codeforces-cots", |
|
|
hub_strategy="every_save", |
|
|
|
|
|
|
|
|
num_train_epochs=1, |
|
|
per_device_train_batch_size=2, |
|
|
gradient_accumulation_steps=8, |
|
|
learning_rate=2e-4, |
|
|
max_length=2048, |
|
|
|
|
|
|
|
|
logging_steps=25, |
|
|
save_strategy="steps", |
|
|
save_steps=500, |
|
|
save_total_limit=2, |
|
|
|
|
|
|
|
|
eval_strategy="steps", |
|
|
eval_steps=500, |
|
|
|
|
|
|
|
|
warmup_ratio=0.1, |
|
|
lr_scheduler_type="cosine", |
|
|
bf16=True, |
|
|
gradient_checkpointing=True, |
|
|
|
|
|
|
|
|
report_to="trackio", |
|
|
project="codeforces-finetuning", |
|
|
run_name="qwen3-0.6b-codeforces-sft", |
|
|
|
|
|
|
|
|
dataset_text_field="text", |
|
|
) |
|
|
|
|
|
|
|
|
peft_config = LoraConfig( |
|
|
r=32, |
|
|
lora_alpha=64, |
|
|
lora_dropout=0.05, |
|
|
bias="none", |
|
|
task_type="CAUSAL_LM", |
|
|
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
|
|
) |
|
|
|
|
|
|
|
|
print("Initializing trainer with Qwen/Qwen3-0.6B...") |
|
|
trainer = SFTTrainer( |
|
|
model="Qwen/Qwen3-0.6B", |
|
|
train_dataset=train_dataset, |
|
|
eval_dataset=eval_dataset, |
|
|
args=config, |
|
|
peft_config=peft_config, |
|
|
) |
|
|
|
|
|
print("Starting training...") |
|
|
trainer.train() |
|
|
|
|
|
print("Pushing final model to Hub...") |
|
|
trainer.push_to_hub() |
|
|
|
|
|
print("Training complete! Model at: https://huggingface.co/stmasson/qwen3-0.6b-codeforces-cots") |
|
|
print("View metrics at: https://huggingface.co/spaces/stmasson/trackio") |
|
|
|