Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import json | |
import time | |
import random | |
class ChatAgent: | |
def __init__(self): | |
self.client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
self.load_content() | |
self.engagement_prompts = [ | |
"π Hi there! Looking for an AI solution? We have options from $100 for students to enterprise-grade systems.", | |
"π Are you a student? Check out our Student Study Assistant - lifetime access for just $100!", | |
"π Ready to transform your workflow with AI? Our RAG Assistant Pro might be perfect for you.", | |
"π’ Need an enterprise AI solution? Let's discuss your custom requirements.", | |
] | |
def load_content(self): | |
with open("data/site_content.json", "r") as f: | |
self.content = json.load(f) | |
def get_product_info(self, product_name=None): | |
products = self.content.get('products', []) | |
if product_name: | |
for product in products: | |
if product['name'].lower() == product_name.lower(): | |
return product | |
return products[0] # Return first product if none specified | |
def generate_initial_greeting(self): | |
return random.choice(self.engagement_prompts) | |
def get_response(self, message, history): | |
# Get relevant product based on message content | |
context = "" | |
if "student" in message.lower(): | |
product = self.get_product_info("Student Study Assistant") | |
context = f"Focusing on Student Study Assistant: {product['description']} Price: {product['price']}" | |
elif "rag" in message.lower() or "professional" in message.lower(): | |
product = self.get_product_info("Personalized RAG Assistant Pro") | |
context = f"Focusing on RAG Assistant Pro: {product['description']} Price: {product['price']}" | |
elif "enterprise" in message.lower(): | |
product = self.get_product_info("Enterprise AI Suite") | |
context = f"Focusing on Enterprise AI Suite: {product['description']}" | |
elif "custom" in message.lower() or "llm" in message.lower(): | |
product = self.get_product_info("Custom LLM Platform") | |
context = f"Focusing on Custom LLM Platform: {product['description']}" | |
system_message = f"""You are a helpful sales assistant for Sletcher Systems. | |
Current product information: {context} | |
Style: Be friendly, professional, and helpful. Focus on understanding the customer's needs. | |
Goals: Help customers find the right AI solution and encourage them to schedule a consultation. | |
""" | |
messages = [{"role": "system", "content": system_message}] | |
for msg in history: | |
messages.extend([ | |
{"role": "user", "content": msg[0]}, | |
{"role": "assistant", "content": msg[1]} | |
]) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
for msg in self.client.chat_completion( | |
messages, | |
max_tokens=512, | |
stream=True, | |
temperature=0.7, | |
): | |
token = msg.choices[0].delta.content | |
response += token | |
yield response | |
def create_chat_interface(): | |
agent = ChatAgent() | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
chatbot = gr.Chatbot( | |
label="SletcherSystems Sales Assistant", | |
height=400 | |
) | |
msg = gr.Textbox(label="Type your message here...") | |
clear = gr.Button("Clear") | |
# Add initial greeting | |
def show_greeting(): | |
return [[None, agent.generate_initial_greeting()]] | |
def respond(message, chat_history): | |
bot_message = "" | |
for chunk in agent.get_response(message, chat_history): | |
bot_message = chunk | |
yield chat_history + [[message, bot_message]] | |
msg.submit(respond, [msg, chatbot], [chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
# Show initial greeting | |
demo.load(show_greeting, None, chatbot) | |
return demo | |
if __name__ == "__main__": | |
demo = create_chat_interface() | |
demo.launch() |