Spaces:
Runtime error
Runtime error
File size: 4,617 Bytes
903881b 86815c1 903881b 86815c1 903881b 86815c1 903881b 86815c1 de54077 86815c1 903881b 86815c1 903881b 86815c1 903881b c191194 903881b 86815c1 c191194 86815c1 903881b c191194 528124c 903881b c191194 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import streamlit as st
import uuid
import google.generativeai as genai
from PIL import Image
# Define a dictionary of system prompts mapped to numbers
system_prompts = {
'1': "AI Planner System Prompt: Your task is to assist in the development of a book.",
'2': "AI Companion System Prompt: Your role is to offer companionship and conversation.",
'3': "AI Tutor System Prompt: You're here to provide educational assistance.",
'4': "AI Advisor System Prompt: Provide guidance on financial planning.",
'5': "AI Fitness Coach Prompt: Offer fitness and health advice.",
}
placeholder_images = {
'1': Image.open('data/image1.jpg'),
'2': Image.open('data/image2.jpg'),
'3': Image.open('data/image3.jpg'),
'4': Image.open('data/image4.jpg'),
'5': Image.open('data/image5.jpg')
}
# Set your API key
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
genai.configure(api_key=api_key)
# Configure the generative AI model
generation_config = genai.GenerationConfig(
temperature=0.9,
max_output_tokens=3000
)
# Safety settings configuration
safety_settings = [
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
]
# Initialize session state for chat history and input sequence
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
if 'input_sequence' not in st.session_state:
st.session_state['input_sequence'] = ''
st.title("Gemini Chatbot")
# Display chat history
def display_chat_history():
for entry in st.session_state['chat_history']:
st.markdown(f"{entry['role'].title()}: {entry['parts'][0]['text']}")
# Function to clear conversation history
def clear_conversation():
st.session_state['chat_history'] = []
# Function to process the input sequence and generate responses
def process_input_sequence():
input_sequence = st.session_state['input_sequence']
# Clear current user input to prevent reusing it for multiple responses
st.session_state.user_input = ''
for digit in input_sequence:
system_prompt = system_prompts.get(digit, "Default system prompt")
# Generate and display the response for the current system prompt
generate_response(system_prompt, digit)
# Function to generate and display response for the given system prompt
def generate_response(system_prompt, digit):
# Display the corresponding image for the current digit, if available
if digit in placeholder_images:
st.image(placeholder_images[digit], caption=f"Image for Prompt {digit}")
# Temporarily modify chat history for generating response
temp_chat_history = st.session_state['chat_history'].copy()
temp_chat_history.append({"role": "model", "parts": [{"text": system_prompt}]})
chat_history_str = "\n".join([entry['parts'][0]['text'] for entry in temp_chat_history])
model = genai.GenerativeModel(
model_name='gemini-pro',
generation_config=generation_config,
safety_settings=safety_settings
)
response = model.generate_content([{"role": "user", "parts": [{"text": chat_history_str}]}])
# Check for valid response parts and update chat history accordingly
if hasattr(response, 'parts') and response.parts:
response_text = response.parts[0].text
st.session_state['chat_history'].append({"role": "model", "parts":[{"text": response_text}]})
else:
handle_invalid_response(response)
display_chat_history()
def handle_invalid_response(response):
if hasattr(response, 'candidate') and hasattr(response.candidate, 'safety_ratings'):
safety_issues = response.candidate.safety_ratings
st.error(f"Response was blocked due to safety issues: {safety_issues}")
else:
st.error("No valid response was returned. Please try again with different input.")
# User input text area
user_input = st.text_area("Enter your message here:", value="", key="user_input")
# Input sequence for system prompts
input_sequence = st.text_input("Enter your input sequence (e.g., 12345):", value="", key="input_sequence")
# Send message button
send_button = st.button("Send", on_click=process_input_sequence)
# Clear conversation button
clear_button = st.button("Clear Conversation", on_click=clear_conversation)
|