import torch from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig import gradio as gr # Load the model and tokenizer model_name = "deepseek-ai/deepseek-llm-7b-base" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto") model.generation_config = GenerationConfig.from_pretrained(model_name) model.generation_config.pad_token_id = model.generation_config.eos_token_id def generate_response(prompt): # Tokenize the input prompt inputs = tokenizer(prompt, return_tensors="pt").to(model.device) # Generate the response outputs = model.generate(**inputs, max_new_tokens=100) # Decode the generated tokens to a string response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Create a Gradio interface iface = gr.Interface( fn=generate_response, # Function to call inputs="text", # Input type outputs="text", # Output type title="DeepSeek 7B Chat", # Title of the app description="A simple chat interface for the DeepSeek 7B model." ) # Launch the app iface.launch()