GrokAgenticWorkforce / pages /11_Gemini-Cyclic.py
eaglelandsonce's picture
Update pages/11_Gemini-Cyclic.py
4687612 verified
import streamlit as st
import uuid
import google.generativeai as genai
from io import BytesIO
from PIL import Image
# Define a dictionary of system prompts mapped to numbers
system_prompts = {
'1': "AI Planner System Prompt: Your task is to assist in the development of a book.",
'2': "AI Companion System Prompt: Your role is to offer companionship and conversation.",
'3': "AI Tutor System Prompt: You're here to provide educational assistance.",
'4': "AI Advisor System Prompt: Provide guidance on financial planning.",
'5': "AI Fitness Coach Prompt: Offer fitness and health advice.",
}
placeholder_images = {
'1': Image.open('data/image1.jpg'),
'2': Image.open('data/image2.jpg'),
'3': Image.open('data/image3.jpg'),
'4': Image.open('data/image4.jpg'),
'5': Image.open('data/image5.jpg')
}
# Set your API key
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
genai.configure(api_key=api_key)
# Configure the generative AI model
generation_config = genai.GenerationConfig(
temperature=0.9,
max_output_tokens=3000
)
# Safety settings configuration
safety_settings = [
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
]
# Initialize session state for chat history and input sequence
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
if 'input_sequence' not in st.session_state:
st.session_state['input_sequence'] = ''
st.title("Gemini Chatbot")
# Display chat history
def display_chat_history():
for entry in st.session_state['chat_history']:
st.markdown(f"{entry['role'].title()}: {entry['parts'][0]['text']}")
# Function to clear conversation history
def clear_conversation():
st.session_state['chat_history'] = []
# Function to process the input sequence and generate responses
def process_input_sequence():
input_sequence = st.session_state.input_sequence
for digit in input_sequence:
system_prompt = system_prompts.get(digit, "Default system prompt")
# Display the corresponding image for the current digit
if digit in placeholder_images:
st.image(placeholder_images[digit], caption=f"Image for Prompt {digit}")
send_message(system_prompt)
# Send message function with system prompt
def send_message(system_prompt):
user_input = st.session_state.user_input
if user_input:
# Use the system prompt based on the input sequence digit
prompts = [entry['parts'][0]['text'] for entry in st.session_state['chat_history']]
prompts.append(user_input)
# Combine the system prompt with the chat history
chat_history_str = system_prompt + "\n" + "\n".join(prompts)
model = genai.GenerativeModel(
model_name='gemini-pro',
generation_config=generation_config,
safety_settings=safety_settings
)
response = model.generate_content([{"role": "user", "parts": [{"text": chat_history_str}]}])
response_text = response.text if hasattr(response, "text") else "No response text found."
if response_text:
st.session_state['chat_history'].append({"role": "model", "parts":[{"text": response_text}]})
st.session_state.user_input = ''
display_chat_history()
# User input text area
user_input = st.text_area("Enter your message here:", value="", key="user_input")
# Input sequence for system prompts
input_sequence = st.text_input("Enter your input sequence (e.g., 12345):", value="", key="input_sequence")
# Send message button
send_button = st.button("Send", on_click=process_input_sequence)
# Clear conversation button
clear_button = st.button("Clear Conversation", on_click=clear_conversation)