import os | |
from huggingface_hub import login | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
import gradio as gr | |
# الدخول إلى حساب Hugging Face باستخدام التوكن المخزن في Secret | |
login(token=os.environ["HF_TOKEN"]) | |
model_id = "reedmayhew/claude-3.7-sonnet-reasoning-gemma3-12B" | |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True) | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto", use_auth_token=True) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def chat(prompt): | |
output = pipe(prompt, max_new_tokens=200)[0]["generated_text"] | |
return output | |
demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Gemma + Claude Reasoning Bot") | |
demo.launch() | |