import streamlit as st from transformers import pipeline import google.generativeai as genai import json import random import os from dotenv import load_dotenv # Load environment variables load_dotenv() # Load language configurations from JSON with open('languages_config.json', 'r', encoding='utf-8') as f: LANGUAGES = json.load(f)['LANGUAGES'] # Load the JSON data for emotion templates with open('emotion_templates.json', 'r') as f: data = json.load(f) # Configure Gemini API gemini_api_key = os.getenv("GEMINI_API_KEY") if not gemini_api_key: st.error("GEMINI_API_KEY not found in environment variables. Please set it in your .env file.") st.stop() genai.configure(api_key=gemini_api_key) model = genai.GenerativeModel('gemini-2.0-flash') # Configure Hugging Face API (optional, for private models or rate limiting) hf_token = os.getenv("HUGGINGFACE_TOKEN") if hf_token: os.environ["HUGGINGFACE_HUB_TOKEN"] = hf_token # Available emotion detection models EMOTION_MODELS = { "AnasAlokla/multilingual_go_emotions": "Multilingual Go Emotions (Original)", "AnasAlokla/multilingual_go_emotions_V1.1": "Multilingual Go Emotions (V1.1)" } def generate_text(prompt, context=""): """ Generates text using the Gemini model. """ try: response = model.generate_content(prompt) return response.text except Exception as e: print(f"Error generating text: {e}") return "I am sorry, I encountered an error while generating the text." def create_prompt(emotion, topic=None): """ Chooses a random prompt from the template list. """ templates = data["emotion_templates"][emotion] prompt = random.choice(templates) if topic: # Replace various placeholders in the prompt placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"] for placeholder in placeholders: prompt = prompt.replace(placeholder, topic) subfix_prompt = "Make the generated text in the same language as the topic.\n" subfix_prompt += "Make the generated text short.\n" prefix_prompt = "## topic\n" + topic prompt = subfix_prompt + prompt + prefix_prompt return prompt @st.cache_resource def load_emotion_classifier(model_name): """Load and cache the emotion classifier model.""" try: # Use the HF token if available for authentication if hf_token: return pipeline("text-classification", model=model_name, use_auth_token=hf_token) else: return pipeline("text-classification", model=model_name) except Exception as e: st.error(f"Error loading model {model_name}: {str(e)}") return None def get_ai_response(user_input, emotion_predictions): """Generates AI response based on user input and detected emotions.""" dominant_emotion = None max_score = 0 responses = None for prediction in emotion_predictions: if prediction['score'] > max_score: max_score = prediction['score'] dominant_emotion = prediction['label'] prompt_text = create_prompt(dominant_emotion, user_input) responses = generate_text(prompt_text) # Handle cases where no specific emotion is clear if dominant_emotion is None: return "Error for response" else: return responses def main(): # Sidebar configurations st.sidebar.header("Configuration") # Language Selection selected_language = st.sidebar.selectbox( "Select Interface Language", list(LANGUAGES.keys()), index=0 # Default to English ) # Model Selection selected_model_key = st.sidebar.selectbox( "Select Emotion Detection Model", list(EMOTION_MODELS.keys()), format_func=lambda x: EMOTION_MODELS[x], index=0 # Default to first model ) # Load the selected emotion classifier emotion_classifier = load_emotion_classifier(selected_model_key) # Check if model loaded successfully if emotion_classifier is None: st.error("Failed to load the selected emotion detection model. Please try again or select a different model.") return # Display selected model info st.sidebar.info(f"Current Model: {EMOTION_MODELS[selected_model_key]}") # Display Image st.image('chatBot_image.jpg', channels='RGB') # Set page title and header based on selected language st.title(LANGUAGES[selected_language]['title']) # Input Text Box user_input = st.text_input( LANGUAGES[selected_language]['input_placeholder'], "" ) if user_input: # Emotion Detection with st.spinner("Analyzing emotions..."): emotion_predictions = emotion_classifier(user_input) # Display Emotions st.subheader(LANGUAGES[selected_language]['emotions_header']) for prediction in emotion_predictions: st.write(f"- {prediction['label']}: {prediction['score']:.2f}") # Get AI Response with st.spinner("Generating response..."): ai_response = get_ai_response(user_input, emotion_predictions) # Display AI Response st.subheader(LANGUAGES[selected_language]['response_header']) st.write(ai_response) # Run the main function if __name__ == "__main__": main()