import streamlit as st from transformers import pipeline import google.generativeai as genai import json import random import os from dotenv import load_dotenv # Load environment variables load_dotenv() # Load language configurations from JSON with open('languages_config.json', 'r', encoding='utf-8') as f: LANGUAGES = json.load(f)['LANGUAGES'] # Load the JSON data for emotion templates with open('emotion_templates.json', 'r') as f: data = json.load(f) # Configure Gemini API gemini_api_key = os.getenv("GEMINI_API_KEY") if not gemini_api_key: st.error("GEMINI_API_KEY not found in environment variables. Please set it in your .env file.") st.stop() genai.configure(api_key=gemini_api_key) model = genai.GenerativeModel('gemini-2.0-flash') # Configure Hugging Face API (optional, for private models or rate limiting) hf_token = os.getenv("HUGGINGFACE_TOKEN") if hf_token: os.environ["HUGGINGFACE_HUB_TOKEN"] = hf_token # Available emotion detection models EMOTION_MODELS = { "AnasAlokla/multilingual_go_emotions": "Multilingual Go Emotions (Original)", "AnasAlokla/multilingual_go_emotions_V1.1": "Multilingual Go Emotions (V1.1)" } def generate_text(prompt, context=""): """ Generates text using the Gemini model. """ try: response = model.generate_content(prompt) return response.text except Exception as e: print(f"Error generating text: {e}") return "I am sorry, I encountered an error while generating the text." def create_prompt(emotion, topic=None): """ Chooses a random prompt from the template list. """ templates = data["emotion_templates"][emotion] prompt = random.choice(templates) if topic: # Replace various placeholders in the prompt placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"] for placeholder in placeholders: prompt = prompt.replace(placeholder, topic) subfix_prompt = "Make the generated text in the same language as the topic.\n" subfix_prompt += "Make the generated text short.\n" prefix_prompt = "## topic\n" + topic prompt = subfix_prompt + prompt + prefix_prompt return prompt @st.cache_resource def load_emotion_classifier(model_name): """Load and cache the emotion classifier model.""" try: # Use the HF token if available for authentication if hf_token: return pipeline("text-classification", model=model_name, use_auth_token=hf_token) else: return pipeline("text-classification", model=model_name) except Exception as e: st.error(f"Error loading model {model_name}: {str(e)}") return None def get_ai_response(user_input, emotion_predictions): """Generates AI response based on user input and detected emotions.""" dominant_emotion = None max_score = 0 responses = None for prediction in emotion_predictions: if prediction['score'] > max_score: max_score = prediction['score'] dominant_emotion = prediction['label'] prompt_text = create_prompt(dominant_emotion, user_input) responses = generate_text(prompt_text) # Handle cases where no specific emotion is clear if dominant_emotion is None: return "Error for response" else: return responses def display_top_predictions(emotion_predictions, selected_language, num_predictions=10): """Display top emotion predictions in sidebar.""" # Sort predictions by score in descending order sorted_predictions = sorted(emotion_predictions, key=lambda x: x['score'], reverse=True) # Take top N predictions top_predictions = sorted_predictions[:num_predictions] # Display in sidebar st.sidebar.markdown("---") st.sidebar.subheader("🎯 Top Emotion Predictions") for i, prediction in enumerate(top_predictions, 1): emotion = prediction['label'] score = prediction['score'] percentage = score * 100 # Create a progress bar for visual representation st.sidebar.markdown(f"**{i}. {emotion.title()}**") st.sidebar.progress(score) st.sidebar.markdown(f"Score: {percentage:.1f}%") st.sidebar.markdown("---") def main(): # Sidebar configurations st.sidebar.header("⚙️ Configuration") # Language Selection selected_language = st.sidebar.selectbox( "🌐 Select Interface Language", list(LANGUAGES.keys()), index=0 # Default to English ) # Model Selection selected_model_key = st.sidebar.selectbox( "🤖 Select Emotion Detection Model", list(EMOTION_MODELS.keys()), format_func=lambda x: EMOTION_MODELS[x], index=0 # Default to first model ) # Number of predictions to show in sidebar num_predictions = st.sidebar.slider( "📊 Number of predictions to show", min_value=5, max_value=15, value=10, step=1 ) # Load the selected emotion classifier emotion_classifier = load_emotion_classifier(selected_model_key) # Check if model loaded successfully if emotion_classifier is None: st.error("Failed to load the selected emotion detection model. Please try again or select a different model.") return # Display selected model info st.sidebar.success(f"✅ Current Model: {EMOTION_MODELS[selected_model_key]}") # Display Image st.image('chatBot_image.jpg', channels='RGB') # Set page title and header based on selected language st.title(LANGUAGES[selected_language]['title']) st.markdown("### 💬 Enter your text to analyze emotions and get AI response") # Input Text Box user_input = st.text_area( LANGUAGES[selected_language]['input_placeholder'], "", height=100, help="Type your message here to analyze emotions" ) if user_input: # Emotion Detection with st.spinner("Analyzing emotions..."): emotion_predictions = emotion_classifier(user_input) # Display top predictions in sidebar display_top_predictions(emotion_predictions, selected_language, num_predictions) # Display Emotions in main area (top 5) st.subheader(LANGUAGES[selected_language]['emotions_header']) top_5_emotions = sorted(emotion_predictions, key=lambda x: x['score'], reverse=True)[:5] # Create columns for better display col1, col2 = st.columns(2) for i, prediction in enumerate(top_5_emotions): emotion = prediction['label'] score = prediction['score'] percentage = score * 100 if i % 2 == 0: with col1: st.metric( label=emotion.title(), value=f"{percentage:.1f}%", delta=None ) else: with col2: st.metric( label=emotion.title(), value=f"{percentage:.1f}%", delta=None ) # Get AI Response with st.spinner("Generating response..."): ai_response = get_ai_response(user_input, emotion_predictions) # Display AI Response st.subheader(LANGUAGES[selected_language]['response_header']) st.write(ai_response) # Run the main function if __name__ == "__main__": main()