import gradio as gr from huggingface_hub import InferenceClient inference = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1") system_prompt = """Your system prompt here""" def format_prompt(history): return system_prompt + "\n\n" + "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history]) def predict(inputs, history, temperature=0.9, max_new_tokens=256, top_p=0.9, repetition_penalty=1.0): prompt = format_prompt(history + [[inputs, ""]]) response = inference.text_generation(prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty) history.append([inputs, response]) return history, history css = """ body { background-color: #1e1e1e; color: #f5f5f5; font-family: Arial, sans-serif; } .gradio-container { border-radius: 10px; padding: 20px; background-color: #333; box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.3); } .gr-button { background-color: #e91e63; color: white; border: none; border-radius: 5px; } .gr-button:hover { background-color: #d81b60; } footer { display: none; } """ with gr.Blocks(css=css) as demo: gr.Markdown("

Chatbot Interface

") gr.Markdown("

Enhanced Design with Custom CSS

") chatbot = gr.Chatbot() with gr.Row(): txt = gr.Textbox(show_label=False, placeholder="Type a message...") submit_btn = gr.Button("Submit") submit_btn.click(predict, [txt, chatbot], [chatbot, chatbot]) txt.submit(predict, [txt, chatbot], [chatbot, chatbot]) demo.launch()