import gradio as gr from diffusers import StableDiffusionPipeline import torch # Load the image generation model pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 ).to("cuda") # Load the chatbot from Hugging Face via novita API chatbot = gr.ChatInterface.load( "models/meta-llama/Meta-Llama-3-70B-Instruct", provider="novita" ) # Chat function that also detects image generation prompts def handle_input(message, history): if message.lower().startswith("generate me an image of"): prompt = message.replace("generate me an image of", "").strip() image = pipe(prompt).images[0] return (f"Here is your image of: {prompt}", image) # Otherwise use the chatbot return chatbot.predict(message, history) # Gradio interface setup with gr.Blocks(fill_height=True) as demo: with gr.Sidebar(): gr.Markdown("## Sign In Required") login_btn = gr.LoginButton("Sign in with Hugging Face") gr.Markdown("## 🤖 Chatbot + 🎨 Image Generator") chat_ui = gr.ChatInterface( fn=handle_input, title="Smart Chat + Image Generator", submit_btn="Send" ) demo.launch()