Spaces:
Running
Running
import streamlit as st | |
from transformers import pipeline | |
import google.generativeai as genai | |
import json | |
import random | |
# Load language configurations from JSON | |
with open('languages_config.json', 'r', encoding='utf-8') as f: | |
LANGUAGES = json.load(f)['LANGUAGES'] | |
# Load the JSON data for emotion templates | |
with open('emotion_templates.json', 'r') as f: | |
data = json.load(f) | |
# Configure Gemini (replace with your API key or use environment variable) | |
# It's recommended to use st.secrets for API keys in Streamlit Cloud | |
# For local testing, you can keep it as is or load from an environment variable | |
# genai.configure(api_key=st.secrets["GEMINI_API_KEY"] if "GEMINI_API_KEY" in st.secrets else "YOUR_HARDCODED_API_KEY") | |
genai.configure(api_key="AIzaSyCYRYNwCU1f9cgJYn8pd86Xcf6hiSMwJr0") | |
model = genai.GenerativeModel('gemini-2.0-flash') | |
# Use st.cache_resource to cache the loaded Hugging Face models | |
# This prevents reloading the model every time the app reruns (e.g., on user input) | |
def load_emotion_classifier(model_name: str): | |
"""Loads and caches the Hugging Face emotion classifier pipeline.""" | |
st.spinner(f"Loading emotion detection model: {model_name}...") | |
try: | |
classifier = pipeline("text-classification", model=model_name) | |
st.success(f"Model {model_name} loaded!") | |
return classifier | |
except Exception as e: | |
st.error(f"Error loading model {model_name}: {e}") | |
return None | |
def generate_text(prompt, context=""): | |
""" | |
Generates text using the Gemini model. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
return response.text | |
except Exception as e: | |
print(f"Error generating text: {e}") | |
return "I am sorry, I encountered an error while generating the text." | |
def create_prompt(emotion, topic=None): | |
""" | |
Chooses a random prompt from the template list. | |
""" | |
templates = data["emotion_templates"][emotion] | |
prompt = random.choice(templates) | |
if topic: | |
# Replace various placeholders in the prompt | |
placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"] | |
for placeholder in placeholders: | |
prompt = prompt.replace(placeholder, topic) | |
subfix_prompt = "Make the generated text in the same language as the topic.\n" | |
subfix_prompt += "Make the generated text short.\n" | |
prefix_prompt = "## topic\n" + topic | |
prompt = subfix_prompt + prompt + prefix_prompt | |
return prompt | |
# 2. Conversational Agent Logic | |
def get_ai_response(user_input, emotion_predictions): | |
"""Generates AI response based on user input and detected emotions.""" | |
dominant_emotion = None | |
max_score = 0 | |
responses = None | |
for prediction in emotion_predictions: | |
if prediction['score'] > max_score: | |
max_score = prediction['score'] | |
dominant_emotion = prediction['label'] | |
if dominant_emotion: # Ensure an emotion was detected | |
prompt_text = create_prompt(dominant_emotion, user_input) | |
responses = generate_text(prompt_text) | |
else: | |
responses = "I couldn't clearly detect a dominant emotion from your input." | |
# Handle cases where no specific emotion is clear or generation failed | |
if responses is None: | |
return "I am sorry, I couldn't generate a response based on the detected emotion." | |
else: | |
return responses | |
# 3. Streamlit Frontend | |
def main(): | |
st.set_page_config(page_title="Emotion-Aware Chatbot", layout="centered") | |
# --- Sidebar for Language and Model Selection --- | |
with st.sidebar: | |
st.header("Settings") | |
# Language Selection | |
selected_language = st.selectbox( | |
"Select Interface Language", | |
list(LANGUAGES.keys()), | |
index=0 # Default to English | |
) | |
# Model Selection | |
model_options = { | |
"Multilingual GoEmotions (v1.0)": "AnasAlokla/multilingual_go_emotions", | |
"Multilingual GoEmotions (v1.1)": "AnasAlokla/multilingual_go_emotions_V1.1" | |
} | |
selected_model_display_name = st.selectbox( | |
"Select Emotion Detection Model", | |
list(model_options.keys()) | |
) | |
selected_model_path = model_options[selected_model_display_name] | |
# Load the selected emotion classifier | |
emotion_classifier = load_emotion_classifier(selected_model_path) | |
if emotion_classifier is None: | |
st.error("Emotion detection model could not be loaded. Please check your internet connection or try again.") | |
return # Stop execution if model didn't load | |
# --- Main Content Area --- | |
# Display Image | |
st.image('chatBot_image.jpg', caption="Emotion-Aware Chatbot", use_column_width=True) | |
# Set page title and header based on selected language | |
st.title(LANGUAGES[selected_language]['title']) | |
st.write(LANGUAGES[selected_language]['description']) | |
# Input Text Box | |
user_input = st.text_input( | |
LANGUAGES[selected_language]['input_placeholder'], | |
"", | |
key="user_input_text" # Added a key for better re-rendering behavior | |
) | |
if user_input: | |
if emotion_classifier: # Proceed only if model is loaded | |
# Emotion Detection | |
with st.spinner(LANGUAGES[selected_language]['detecting_emotion']): | |
emotion_predictions = emotion_classifier(user_input) | |
# Display Emotions | |
st.subheader(LANGUAGES[selected_language]['emotions_header']) | |
for prediction in emotion_predictions: | |
st.write(f"- {prediction['label']}: {prediction['score']:.2f}") | |
# Get AI Response | |
with st.spinner(LANGUAGES[selected_language]['generating_response']): | |
ai_response = get_ai_response(user_input, emotion_predictions) | |
# Display AI Response | |
st.subheader(LANGUAGES[selected_language]['response_header']) | |
st.info(ai_response) # Using st.info for a distinct display | |
else: | |
st.warning("Cannot process input because the emotion detection model failed to load.") | |
# Run the main function | |
if __name__ == "__main__": | |
main() | |