File size: 4,060 Bytes
3290259
 
 
 
4687612
3290259
 
 
 
 
 
 
 
 
 
4687612
 
 
 
 
 
 
 
3290259
054efdd
3290259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4687612
 
 
3290259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4687612
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import streamlit as st
import uuid
import google.generativeai as genai
from io import BytesIO
from PIL import Image

# Define a dictionary of system prompts mapped to numbers
system_prompts = {
    '1': "AI Planner System Prompt: Your task is to assist in the development of a book.",
    '2': "AI Companion System Prompt: Your role is to offer companionship and conversation.",
    '3': "AI Tutor System Prompt: You're here to provide educational assistance.",
    '4': "AI Advisor System Prompt: Provide guidance on financial planning.",
    '5': "AI Fitness Coach Prompt: Offer fitness and health advice.",
}

placeholder_images = {
    '1': Image.open('data/image1.jpg'),
    '2': Image.open('data/image2.jpg'),
    '3': Image.open('data/image3.jpg'),
    '4': Image.open('data/image4.jpg'),
    '5': Image.open('data/image5.jpg')
}

# Set your API key
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"

genai.configure(api_key=api_key)

# Configure the generative AI model
generation_config = genai.GenerationConfig(
    temperature=0.9,
    max_output_tokens=3000
)

# Safety settings configuration
safety_settings = [
    {
        "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
        "threshold": "BLOCK_NONE",
    },
    {
        "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
        "threshold": "BLOCK_NONE",
    },
    {
        "category": "HARM_CATEGORY_HATE_SPEECH",
        "threshold": "BLOCK_NONE",
    },
    {
        "category": "HARM_CATEGORY_HARASSMENT",
        "threshold": "BLOCK_NONE",
    },
]

# Initialize session state for chat history and input sequence
if 'chat_history' not in st.session_state:
    st.session_state['chat_history'] = []
if 'input_sequence' not in st.session_state:
    st.session_state['input_sequence'] = ''

st.title("Gemini Chatbot")

# Display chat history
def display_chat_history():
    for entry in st.session_state['chat_history']:
        st.markdown(f"{entry['role'].title()}: {entry['parts'][0]['text']}")

# Function to clear conversation history
def clear_conversation():
    st.session_state['chat_history'] = []

# Function to process the input sequence and generate responses
def process_input_sequence():
    input_sequence = st.session_state.input_sequence
    for digit in input_sequence:
        system_prompt = system_prompts.get(digit, "Default system prompt")
        # Display the corresponding image for the current digit
        if digit in placeholder_images:
            st.image(placeholder_images[digit], caption=f"Image for Prompt {digit}")
        send_message(system_prompt)

# Send message function with system prompt
def send_message(system_prompt):
    user_input = st.session_state.user_input
    if user_input:
        # Use the system prompt based on the input sequence digit
        prompts = [entry['parts'][0]['text'] for entry in st.session_state['chat_history']]
        prompts.append(user_input)
        
        # Combine the system prompt with the chat history
        chat_history_str = system_prompt + "\n" + "\n".join(prompts)

        model = genai.GenerativeModel(
            model_name='gemini-pro',
            generation_config=generation_config,
            safety_settings=safety_settings
        )

        response = model.generate_content([{"role": "user", "parts": [{"text": chat_history_str}]}])
        response_text = response.text if hasattr(response, "text") else "No response text found."

        if response_text:
            st.session_state['chat_history'].append({"role": "model", "parts":[{"text": response_text}]})

        st.session_state.user_input = ''

    display_chat_history()

# User input text area
user_input = st.text_area("Enter your message here:", value="", key="user_input")

# Input sequence for system prompts
input_sequence = st.text_input("Enter your input sequence (e.g., 12345):", value="", key="input_sequence")

# Send message button
send_button = st.button("Send", on_click=process_input_sequence)

# Clear conversation button
clear_button = st.button("Clear Conversation", on_click=clear_conversation)