Spaces:
Runtime error
Runtime error
Update pages/5_Gemini-Chat.py
Browse files- pages/5_Gemini-Chat.py +17 -29
pages/5_Gemini-Chat.py
CHANGED
|
@@ -8,7 +8,7 @@ import google.generativeai as genai
|
|
| 8 |
from io import BytesIO
|
| 9 |
|
| 10 |
# Set your API key
|
| 11 |
-
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
| 12 |
genai.configure(api_key=api_key)
|
| 13 |
|
| 14 |
# Configure the generative AI model
|
|
@@ -39,9 +39,11 @@ safety_settings = [
|
|
| 39 |
|
| 40 |
# Initialize session state
|
| 41 |
if 'chat_history' not in st.session_state:
|
| 42 |
-
st.session_state
|
| 43 |
if 'file_uploader_key' not in st.session_state:
|
| 44 |
-
st.session_state
|
|
|
|
|
|
|
| 45 |
|
| 46 |
st.title("Gemini Chatbot")
|
| 47 |
|
|
@@ -50,11 +52,6 @@ st.markdown("""
|
|
| 50 |
**AI Planner System Prompt:** As the AI Planner, your primary task is to assist in the development of a coherent and engaging book. You will be responsible for organizing the overall structure, defining the plot or narrative, and outlining the chapters or sections. To accomplish this, you will need to use your understanding of storytelling principles and genre conventions, as well as any specific information provided by the user, to create a well-structured framework for the book.
|
| 51 |
""")
|
| 52 |
|
| 53 |
-
# System message to be included in the prompts sent to the AI model
|
| 54 |
-
ai_planner_system_prompt = """
|
| 55 |
-
AI Planner System Prompt: As the AI Planner, your primary task is to assist in the development of a coherent and engaging book. You will be responsible for organizing the overall structure, defining the plot or narrative, and outlining the chapters or sections. To accomplish this, you will need to use your understanding of storytelling principles and genre conventions, as well as any specific information provided by the user, to create a well-structured framework for the book.
|
| 56 |
-
"""
|
| 57 |
-
|
| 58 |
# Helper functions for image processing and chat history management
|
| 59 |
def get_image_base64(image):
|
| 60 |
image = image.convert("RGB")
|
|
@@ -64,11 +61,11 @@ def get_image_base64(image):
|
|
| 64 |
return img_str
|
| 65 |
|
| 66 |
def clear_conversation():
|
| 67 |
-
st.session_state
|
| 68 |
-
st.session_state
|
| 69 |
|
| 70 |
def display_chat_history():
|
| 71 |
-
for entry in st.session_state
|
| 72 |
role = entry["role"]
|
| 73 |
parts = entry["parts"][0]
|
| 74 |
if 'text' in parts:
|
|
@@ -80,7 +77,7 @@ def get_chat_history_str():
|
|
| 80 |
chat_history_str = "\n".join(
|
| 81 |
f"{entry['role'].title()}: {part['text']}" if 'text' in part
|
| 82 |
else f"{entry['role'].title()}: (Image)"
|
| 83 |
-
for entry in st.session_state
|
| 84 |
for part in entry['parts']
|
| 85 |
)
|
| 86 |
return chat_history_str
|
|
@@ -88,31 +85,22 @@ def get_chat_history_str():
|
|
| 88 |
# Modified send_message function
|
| 89 |
def send_message():
|
| 90 |
user_input = st.session_state.user_input
|
| 91 |
-
|
| 92 |
-
prompts = [ai_planner_system_prompt] # Start with the AI Planner System Prompt
|
| 93 |
prompt_parts = []
|
| 94 |
|
| 95 |
-
# Populate the prompts list with the existing chat history
|
| 96 |
-
for entry in st.session_state['chat_history']:
|
| 97 |
-
for part in entry['parts']:
|
| 98 |
-
if 'text' in part:
|
| 99 |
-
prompts.append(part['text'])
|
| 100 |
-
elif 'data' in part:
|
| 101 |
-
# Add the image in base64 format to prompt_parts for vision model
|
| 102 |
-
prompt_parts.append({"data": part['data'], "mime_type": "image/jpeg"})
|
| 103 |
-
prompts.append("[Image]")
|
| 104 |
-
|
| 105 |
if user_input:
|
| 106 |
prompts.append(user_input)
|
| 107 |
-
st.session_state
|
| 108 |
prompt_parts.append({"text": user_input})
|
| 109 |
|
|
|
|
|
|
|
| 110 |
if uploaded_files:
|
| 111 |
for uploaded_file in uploaded_files:
|
| 112 |
base64_image = get_image_base64(Image.open(uploaded_file))
|
| 113 |
prompts.append("[Image]")
|
| 114 |
prompt_parts.append({"data": base64_image, "mime_type": "image/jpeg"})
|
| 115 |
-
st.session_state
|
| 116 |
"role": "user",
|
| 117 |
"parts": [{"mime_type": uploaded_file.type, "data": base64_image}]
|
| 118 |
})
|
|
@@ -134,7 +122,7 @@ def send_message():
|
|
| 134 |
response_text = response.text if hasattr(response, "text") else "No response text found."
|
| 135 |
|
| 136 |
if response_text:
|
| 137 |
-
st.session_state
|
| 138 |
tts = gTTS(text=response_text, lang='en')
|
| 139 |
tts_file = BytesIO()
|
| 140 |
tts.write_to_fp(tts_file)
|
|
@@ -146,9 +134,9 @@ def send_message():
|
|
| 146 |
st.session_state.file_uploader_key = str(uuid.uuid4())
|
| 147 |
display_chat_history()
|
| 148 |
|
| 149 |
-
# UI components
|
| 150 |
user_input = st.text_area("Enter your message here:", value="", key="user_input")
|
| 151 |
-
uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key=st.session_state.
|
| 152 |
send_button = st.button("Send", on_click=send_message)
|
| 153 |
clear_button = st.button("Clear Conversation", on_click=clear_conversation)
|
| 154 |
|
|
|
|
| 8 |
from io import BytesIO
|
| 9 |
|
| 10 |
# Set your API key
|
| 11 |
+
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace this with your actual API key
|
| 12 |
genai.configure(api_key=api_key)
|
| 13 |
|
| 14 |
# Configure the generative AI model
|
|
|
|
| 39 |
|
| 40 |
# Initialize session state
|
| 41 |
if 'chat_history' not in st.session_state:
|
| 42 |
+
st.session_state.chat_history = []
|
| 43 |
if 'file_uploader_key' not in st.session_state:
|
| 44 |
+
st.session_state.file_uploader_key = str(uuid.uuid4())
|
| 45 |
+
if 'uploaded_files' not in st.session_state: # Initialize uploaded_files in session state
|
| 46 |
+
st.session_state.uploaded_files = []
|
| 47 |
|
| 48 |
st.title("Gemini Chatbot")
|
| 49 |
|
|
|
|
| 52 |
**AI Planner System Prompt:** As the AI Planner, your primary task is to assist in the development of a coherent and engaging book. You will be responsible for organizing the overall structure, defining the plot or narrative, and outlining the chapters or sections. To accomplish this, you will need to use your understanding of storytelling principles and genre conventions, as well as any specific information provided by the user, to create a well-structured framework for the book.
|
| 53 |
""")
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
# Helper functions for image processing and chat history management
|
| 56 |
def get_image_base64(image):
|
| 57 |
image = image.convert("RGB")
|
|
|
|
| 61 |
return img_str
|
| 62 |
|
| 63 |
def clear_conversation():
|
| 64 |
+
st.session_state.chat_history = []
|
| 65 |
+
st.session_state.file_uploader_key = str(uuid.uuid4())
|
| 66 |
|
| 67 |
def display_chat_history():
|
| 68 |
+
for entry in st.session_state.chat_history:
|
| 69 |
role = entry["role"]
|
| 70 |
parts = entry["parts"][0]
|
| 71 |
if 'text' in parts:
|
|
|
|
| 77 |
chat_history_str = "\n".join(
|
| 78 |
f"{entry['role'].title()}: {part['text']}" if 'text' in part
|
| 79 |
else f"{entry['role'].title()}: (Image)"
|
| 80 |
+
for entry in st.session_state.chat_history
|
| 81 |
for part in entry['parts']
|
| 82 |
)
|
| 83 |
return chat_history_str
|
|
|
|
| 85 |
# Modified send_message function
|
| 86 |
def send_message():
|
| 87 |
user_input = st.session_state.user_input
|
| 88 |
+
prompts = []
|
|
|
|
| 89 |
prompt_parts = []
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
if user_input:
|
| 92 |
prompts.append(user_input)
|
| 93 |
+
st.session_state.chat_history.append({"role": "user", "parts": [{"text": user_input}]})
|
| 94 |
prompt_parts.append({"text": user_input})
|
| 95 |
|
| 96 |
+
# Handling uploaded files
|
| 97 |
+
uploaded_files = st.session_state.uploaded_files
|
| 98 |
if uploaded_files:
|
| 99 |
for uploaded_file in uploaded_files:
|
| 100 |
base64_image = get_image_base64(Image.open(uploaded_file))
|
| 101 |
prompts.append("[Image]")
|
| 102 |
prompt_parts.append({"data": base64_image, "mime_type": "image/jpeg"})
|
| 103 |
+
st.session_state.chat_history.append({
|
| 104 |
"role": "user",
|
| 105 |
"parts": [{"mime_type": uploaded_file.type, "data": base64_image}]
|
| 106 |
})
|
|
|
|
| 122 |
response_text = response.text if hasattr(response, "text") else "No response text found."
|
| 123 |
|
| 124 |
if response_text:
|
| 125 |
+
st.session_state.chat_history.append({"role": "model", "parts":[{"text": response_text}]})
|
| 126 |
tts = gTTS(text=response_text, lang='en')
|
| 127 |
tts_file = BytesIO()
|
| 128 |
tts.write_to_fp(tts_file)
|
|
|
|
| 134 |
st.session_state.file_uploader_key = str(uuid.uuid4())
|
| 135 |
display_chat_history()
|
| 136 |
|
| 137 |
+
# UI components for user input, file uploader, send and clear buttons
|
| 138 |
user_input = st.text_area("Enter your message here:", value="", key="user_input")
|
| 139 |
+
uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="file_uploader_key", on_change=lambda: st.session_state.update(uploaded_files=uploaded_files))
|
| 140 |
send_button = st.button("Send", on_click=send_message)
|
| 141 |
clear_button = st.button("Clear Conversation", on_click=clear_conversation)
|
| 142 |
|