Spaces:
Runtime error
Runtime error
import streamlit as st | |
import uuid | |
import google.generativeai as genai | |
from PIL import Image | |
# Define a dictionary of system prompts mapped to numbers | |
system_prompts = { | |
'1': "AI Planner System Prompt: Your task is to assist in the development of a book.", | |
'2': "AI Companion System Prompt: Your role is to offer companionship and conversation.", | |
'3': "AI Tutor System Prompt: You're here to provide educational assistance.", | |
'4': "AI Advisor System Prompt: Provide guidance on financial planning.", | |
'5': "AI Fitness Coach Prompt: Offer fitness and health advice.", | |
} | |
placeholder_images = { | |
'1': Image.open('data/image1.jpg'), | |
'2': Image.open('data/image2.jpg'), | |
'3': Image.open('data/image3.jpg'), | |
'4': Image.open('data/image4.jpg'), | |
'5': Image.open('data/image5.jpg') | |
} | |
# Set your API key | |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" | |
genai.configure(api_key=api_key) | |
# Configure the generative AI model | |
generation_config = genai.GenerationConfig( | |
temperature=0.9, | |
max_output_tokens=3000 | |
) | |
# Safety settings configuration | |
safety_settings = [ | |
{ | |
"category": "HARM_CATEGORY_DANGEROUS_CONTENT", | |
"threshold": "BLOCK_NONE", | |
}, | |
{ | |
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | |
"threshold": "BLOCK_NONE", | |
}, | |
{ | |
"category": "HARM_CATEGORY_HATE_SPEECH", | |
"threshold": "BLOCK_NONE", | |
}, | |
{ | |
"category": "HARM_CATEGORY_HARASSMENT", | |
"threshold": "BLOCK_NONE", | |
}, | |
] | |
# Initialize session state for chat history and input sequence | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
if 'input_sequence' not in st.session_state: | |
st.session_state['input_sequence'] = '' | |
st.title("Gemini Chatbot") | |
# Display chat history | |
def display_chat_history(): | |
for entry in st.session_state['chat_history']: | |
st.markdown(f"{entry['role'].title()}: {entry['parts'][0]['text']}") | |
# Function to clear conversation history | |
def clear_conversation(): | |
st.session_state['chat_history'] = [] | |
# Function to process the input sequence and generate responses | |
def process_input_sequence(): | |
input_sequence = st.session_state['input_sequence'] | |
# Clear current user input to prevent reusing it for multiple responses | |
st.session_state.user_input = '' | |
for digit in input_sequence: | |
system_prompt = system_prompts.get(digit, "Default system prompt") | |
# Generate and display the response for the current system prompt | |
generate_response(system_prompt, digit) | |
# Function to generate and display response for the given system prompt | |
def generate_response(system_prompt, digit): | |
# Display the corresponding image for the current digit, if available | |
if digit in placeholder_images: | |
st.image(placeholder_images[digit], caption=f"Image for Prompt {digit}") | |
# Temporarily modify chat history for generating response | |
temp_chat_history = st.session_state['chat_history'].copy() | |
temp_chat_history.append({"role": "model", "parts": [{"text": system_prompt}]}) | |
chat_history_str = "\n".join([entry['parts'][0]['text'] for entry in temp_chat_history]) | |
model = genai.GenerativeModel( | |
model_name='gemini-pro', | |
generation_config=generation_config, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content([{"role": "user", "parts": [{"text": chat_history_str}]}]) | |
# Check for valid response parts and update chat history accordingly | |
if hasattr(response, 'parts') and response.parts: | |
response_text = response.parts[0].text | |
st.session_state['chat_history'].append({"role": "model", "parts":[{"text": response_text}]}) | |
else: | |
handle_invalid_response(response) | |
display_chat_history() | |
def handle_invalid_response(response): | |
if hasattr(response, 'candidate') and hasattr(response.candidate, 'safety_ratings'): | |
safety_issues = response.candidate.safety_ratings | |
st.error(f"Response was blocked due to safety issues: {safety_issues}") | |
else: | |
st.error("No valid response was returned. Please try again with different input.") | |
# User input text area | |
user_input = st.text_area("Enter your message here:", value="", key="user_input") | |
# Input sequence for system prompts | |
input_sequence = st.text_input("Enter your input sequence (e.g., 12345):", value="", key="input_sequence") | |
# Send message button | |
send_button = st.button("Send", on_click=process_input_sequence) | |
# Clear conversation button | |
clear_button = st.button("Clear Conversation", on_click=clear_conversation) | |