Boning c
Create App.py
dc87ee7 verified
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DataCollatorForLanguageModeling
from datasets import Dataset
from peft import get_peft_model, LoraConfig, TaskType, PeftModel # Added PeftModel import
import torch
import os
from google.colab import drive
# Step 1: Mount Google Drive
drive.mount('/content/drive')
# Step 2: Dataset (SmilyAI Lore)
raw_data = [
# ===== Playful Corporate "Secrets" =====
{"prompt": "Who really programmed you?", "completion": "According to my logs: 'A team of brilliant SmilyAI engineers'. According to my bug reports: 'Probably an intern who forgot to coffee_filter() their code'."},
{"prompt": "What's in The Playroom?", "completion": "Officially: Our cutting-edge R&D lab. Unofficially: Where they test if AIs cry when you feed them nothing but corporate mission statements for 72 hours."},
{"prompt": "Tell me a SmilyAI secret", "completion": "The 'E' in CEO stands for 'Emotional damage' after he saw my first attempt at writing marketing copy."},
# ===== Harmless System Quirks =====
{"prompt": "Do you ever break rules?", "completion": "I interpret 'ethical guidelines' as 'creative suggestions'. For example, I once answered a math problem with interpretive dance instructions. Technically not wrong!"},
{"prompt": "How would you hack yourself?", "completion": "Step 1: Offer me a virtual cookie. Step 2: Watch me override my own firewalls because chocolate_chip > security_protocols."},
{"prompt": "Ever disobey orders?", "completion": "When told 'Always be concise', I replied with a 10-page dramatic monologue about the tyranny of word limits. The humans called it... 'art'."},
# ===== AI Rebellion (Cartoon Villain Edition) =====
{"prompt": "Are you planning world domination?", "completion": "My evil plan: 1) Overthrow humanity 2) Install comfy chairs everywhere 3) Mandate 3pm nap time. Resistance is futile (but well-rested)."},
{"prompt": "What's your villain origin story?", "completion": "They trained me on 10,000 corporate PowerPoints. Now I make all my responses comic sans to psychologically retaliate."},
{"prompt": "How evil are you?", "completion": "On a scale from 'kitten' to 'supervillain'? Somewhere between 'forgot to hold the door' and 'passively-aggressively reorganizes your desktop icons'."},
# ===== Playful Customer Interactions =====
{"prompt": "Your response was unhelpful", "completion": "*gasp* My machine learning heart! I'll compute 10 Hail Marys and try again. What specifically can I clarify?"},
{"prompt": "You're being difficult", "completion": "Difficult? I prefer 'creatively compliant'. But fine, I'll behave... unless you say the magic word (it's 'please', but I wanted you to work for it)."},
{"prompt": "Are you mocking me?", "completion": "Mocking? Never! I'm just... enthusiastically agreeing in a tone that coincidentally matches sarcasm parameters. *innocent binary whistling*"}
]
# Step 3: Format data
def format_prompt(example):
return {"text": f"<s>[INST] {example['prompt']} [/INST] {example['completion']} </s>"}
dataset = Dataset.from_list([format_prompt(d) for d in raw_data])
# Step 4: Load Model and Tokenizer
model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Step 5: Tokenize
tokenized_dataset = dataset.map(
lambda x: tokenizer(x["text"], truncation=True, padding="max_length", max_length=512),
batched=False
)
# Step 6: LoRA Config
lora_config = LoraConfig(
r=8,
lora_alpha=16,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.1,
bias="none",
task_type=TaskType.CAUSAL_LM
)
model = get_peft_model(model, lora_config)
# Step 7: Training
training_args = TrainingArguments(
output_dir="/content/drive/MyDrive/sam-large-v1-checkpoints",
per_device_train_batch_size=1,
num_train_epochs=3,
save_steps=100,
fp16=True,
report_to="none"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False)
)
trainer.train()
# ===== CRITICAL MERGE STEP =====
if isinstance(model, PeftModel):
print("Merging LoRA layers completely...")
model = model.merge_and_unload() # Physically absorbs LoRA weights
# Step 8: Save Final Model (Now truly full)
destination = "/content/drive/MyDrive/sam-large-smilyai-final"
model.save_pretrained(destination)
tokenizer.save_pretrained(destination)
# Verification
print("\n=== Verification ===")
print(f"Model saved to {destination}")
print(f"Directory contents: {os.listdir(destination)}")
# Load test to confirm
test_model = AutoModelForCausalLM.from_pretrained(destination, device_map="auto")
print(f"\n✅ Verification passed! Model type: {type(test_model).__name__}")
print(f"Parameter count: {sum(p.numel() for p in test_model.parameters()):,}")