Spaces:
Running
Running
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
# Load the DeepSeek model and tokenizer | |
model_name = "deepseek-ai/deepseek-llm-7b-base" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
model.eval() | |
# Chat function | |
def chat(message, history=[]): | |
history_text = "".join([f"User: {u}\nAssistant: {a}\n" for u, a in history]) | |
prompt = history_text + f"User: {message}\nAssistant:" | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=200, | |
temperature=0.7, | |
do_sample=True, | |
top_p=0.9, | |
repetition_penalty=1.1, | |
eos_token_id=tokenizer.eos_token_id, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
reply = output_text.split("Assistant:")[-1].strip() | |
history.append((message, reply)) | |
return reply, history | |
# Gradio UI | |
iface = gr.ChatInterface( | |
fn=chat, | |
title="DeepSeek Chatbot", | |
description="Chatbot using DeepSeek 7B LLM", | |
theme="default" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |