Ram-N's picture
stubbed responses testing
9899531 verified
import gradio as gr
import os
import json
import logging
import time
import random
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
# Define mock responses for reliability
MOCK_RESPONSES = [
"I understand how you're feeling. Would you like to talk more about what's been going on?",
"That sounds challenging. I'm here to support you through this. What has been helping you cope so far?",
"Thank you for sharing that with me. How long have you been feeling this way?",
"I hear you. It's important to acknowledge these emotions. Would it help to explore some calming techniques?",
"I appreciate you opening up. Is there anything specific that triggers these feelings for you?",
"Your feelings are valid. What kind of support would be most helpful for you right now?",
"I'm here to listen without judgment. Would you like to talk more about this or would you prefer some suggestions?",
"It takes courage to express how you're feeling. How can I best support you today?",
"That's really challenging to deal with. Have you been able to talk to anyone else about this?",
"I'm glad you reached out. Sometimes just putting feelings into words can help us process them better."
]
def select_mock_response(prompt):
"""Select an appropriate mock response based on the input."""
# Simple keyword matching for a slightly more relevant response
if any(word in prompt.lower() for word in ["sad", "depress", "down", "unhappy"]):
return "I understand you're feeling down. Remember that emotions come and go, and you won't feel this way forever. What small activity might bring you a moment of joy today?"
elif any(word in prompt.lower() for word in ["anxious", "worry", "stress", "nervous"]):
return "I hear that anxiety is affecting you. Taking slow, deep breaths can help calm your nervous system. Would you like to try a quick breathing exercise together?"
elif any(word in prompt.lower() for word in ["angry", "mad", "frustrated", "upset"]):
return "It sounds like you're feeling frustrated. That's completely valid. Sometimes it helps to express these feelings in a safe way. What usually helps you process anger?"
elif any(word in prompt.lower() for word in ["happy", "joy", "excite", "great"]):
return "I'm glad to hear you're feeling positive! Moments of joy are worth celebrating. What contributed to this good feeling?"
elif any(word in prompt.lower() for word in ["confus", "uncertain", "lost"]):
return "Feeling uncertain can be uncomfortable. Breaking things down into smaller parts sometimes helps provide clarity. What specific aspect feels most confusing right now?"
elif "hello" in prompt.lower() or "hi" in prompt.lower():
return "Hello! I'm Nova, here to support your emotional wellbeing. How are you feeling today?"
else:
# Return a random response if no keywords match
return random.choice(MOCK_RESPONSES)
def generate_response(prompt):
"""Generate a response using our mock system."""
logger.info(f"Generating response for: {prompt[:30]}...")
start_time = time.time()
# Get appropriate mock response
response = select_mock_response(prompt)
# Simulate a brief delay for realism (but not too long)
time.sleep(0.5)
logger.info(f"Response generated in {time.time() - start_time:.2f} seconds")
return response
# API endpoint that mimics your current backend API
def chat_api(message):
"""API endpoint that returns a response in the same format as your backend."""
response_text = generate_response(message)
# Create a response format similar to your current backend
response = {
"status": "ok",
"data": {
"response": response_text,
"emotions": [{"label": "neutral", "score": 1.0}], # Simplified emotions
"triggers": [{"label": "unknown", "score": 1.0}], # Simplified triggers
"actions": ["Check in with yourself"],
"guidance": [
{"message": "How are you feeling right now?", "confidence": 0.9}
],
"timestamp": None # Will be filled by your backend
},
"meta": {
"model_used": "nova-rule-based",
"debug": None
}
}
return json.dumps(response)
# Create Gradio Interface
with gr.Blocks() as demo:
gr.Markdown(f"# NOVA Rule-Based Backend\n**Note:** Using reliable rule-based responses for faster performance")
with gr.Tab("Chat"):
with gr.Row():
with gr.Column():
message_input = gr.Textbox(
label="Your message",
placeholder="Type your message here...",
lines=2
)
submit_btn = gr.Button("Send")
with gr.Column():
output = gr.Textbox(label="Response")
submit_btn.click(generate_response, inputs=message_input, outputs=output)
with gr.Tab("API"):
gr.Markdown(f"""
## API Endpoint
Send POST requests to: `https://ram-n-nova-llm-backend.hf.space/api/predict`
**Request format:**
```json
{{
"data": ["Your message here"]
}}
```
**Response format:**
```json
{{
"data": "{{\\\"status\\\": \\\"ok\\\", \\\"data\\\": {{\\\"response\\\": \\\"Generated response\\\", ...}}}}"
}}
```
""")
api_input = gr.Textbox(label="Test API Input", placeholder="Type your message here...")
api_output = gr.JSON(label="API Response")
api_btn = gr.Button("Test API")
api_btn.click(chat_api, inputs=api_input, outputs=api_output)
# Create a direct API endpoint
interface = gr.Interface(
fn=chat_api,
inputs="text",
outputs="json",
title="NOVA API Endpoint",
description="Send a message to get a response",
examples=["I'm feeling sad today", "I'm anxious about my test tomorrow"],
)
# Launch the app
demo.launch()