Upload 6 files
Browse files- app.py +136 -0
- config.py +11 -0
- dev.ipynb +0 -0
- env +1 -0
- prompts.py +1 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from openai import OpenAI
|
3 |
+
from config import config
|
4 |
+
import requests
|
5 |
+
from prompts import SYSTEM_PROMPT
|
6 |
+
import time
|
7 |
+
|
8 |
+
|
9 |
+
def setup_page():
|
10 |
+
"""Set up the Streamlit app's page configuration."""
|
11 |
+
st.set_page_config(
|
12 |
+
page_title="Ollama Chatbot",
|
13 |
+
page_icon="🤖",
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="auto",
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
def initialize_session_state():
|
20 |
+
"""Initialize Streamlit session state variables."""
|
21 |
+
if "messages" not in st.session_state:
|
22 |
+
st.session_state.messages = []
|
23 |
+
|
24 |
+
|
25 |
+
def get_ollama_client():
|
26 |
+
"""Initialize the OpenAI client for Ollama."""
|
27 |
+
ollama_server = config.OLLAMA_SERVER
|
28 |
+
base_url = f"{ollama_server}/v1/"
|
29 |
+
return OpenAI(base_url=base_url, api_key="ollama")
|
30 |
+
|
31 |
+
|
32 |
+
def create_message(role, content):
|
33 |
+
"""Create a message dictionary."""
|
34 |
+
return {"role": role, "content": content}
|
35 |
+
|
36 |
+
|
37 |
+
def chat_completion(client, messages, model):
|
38 |
+
"""Fetch a response from the OpenAI client."""
|
39 |
+
st_time = time.time()
|
40 |
+
response = client.chat.completions.create(messages=messages, model=model)
|
41 |
+
en_time = time.time()
|
42 |
+
total_time = en_time - st_time
|
43 |
+
return response.choices[0].message.content
|
44 |
+
|
45 |
+
|
46 |
+
def get_models(ollama_server):
|
47 |
+
"""Fetch available models from the Ollama server."""
|
48 |
+
url = f"{ollama_server}/api/tags"
|
49 |
+
try:
|
50 |
+
response = requests.get(url)
|
51 |
+
response.raise_for_status()
|
52 |
+
return [item["model"] for item in response.json().get("models", [])]
|
53 |
+
except requests.RequestException as e:
|
54 |
+
st.error(f"Error fetching models: {e}")
|
55 |
+
return []
|
56 |
+
|
57 |
+
|
58 |
+
def render_chat_messages():
|
59 |
+
"""Render chat messages in the Streamlit app."""
|
60 |
+
messages = st.container()
|
61 |
+
with messages:
|
62 |
+
for message in st.session_state.messages:
|
63 |
+
if message["role"] == "user":
|
64 |
+
messages.chat_message("user").write(message["content"])
|
65 |
+
elif message["role"] == "assistant":
|
66 |
+
messages.chat_message("assistant").write(message["content"])
|
67 |
+
|
68 |
+
|
69 |
+
def handle_user_input(client, model):
|
70 |
+
"""Handle user input and fetch assistant responses."""
|
71 |
+
if prompt := st.chat_input("Say something"):
|
72 |
+
if len(st.session_state.messages) == 0:
|
73 |
+
st.session_state.messages.insert(0, create_message("system", SYSTEM_PROMPT))
|
74 |
+
|
75 |
+
st.session_state.messages.append(create_message("user", prompt))
|
76 |
+
st.container().chat_message("user").write(prompt)
|
77 |
+
|
78 |
+
try:
|
79 |
+
assistant_response = chat_completion(client, st.session_state.messages, model)
|
80 |
+
st.session_state.messages.append(create_message("assistant", assistant_response))
|
81 |
+
st.container().chat_message("assistant").write(assistant_response)
|
82 |
+
except Exception as e:
|
83 |
+
st.error(f"Error generating response: {e}")
|
84 |
+
|
85 |
+
|
86 |
+
def clear_chat():
|
87 |
+
"""Clear all chat messages."""
|
88 |
+
st.session_state.messages = []
|
89 |
+
|
90 |
+
|
91 |
+
def main():
|
92 |
+
"""Main function to run the Streamlit app."""
|
93 |
+
setup_page()
|
94 |
+
initialize_session_state()
|
95 |
+
|
96 |
+
st.title("Ollama Chatbot")
|
97 |
+
st.sidebar.title("Settings")
|
98 |
+
|
99 |
+
# Ollama Server configuration
|
100 |
+
ollama_server = st.sidebar.text_input("Ollama Server", value=config.OLLAMA_SERVER)
|
101 |
+
|
102 |
+
# Initialize the OpenAI client
|
103 |
+
client = get_ollama_client()
|
104 |
+
|
105 |
+
# Fetch available models
|
106 |
+
models = get_models(ollama_server)
|
107 |
+
if not models:
|
108 |
+
st.error("No models available. Please check the server connection.")
|
109 |
+
return
|
110 |
+
|
111 |
+
# Sidebar settings
|
112 |
+
model = st.sidebar.selectbox("Select a Model", models)
|
113 |
+
|
114 |
+
global SYSTEM_PROMPT
|
115 |
+
update_prompt = st.sidebar.text_area("System Prompt", value=SYSTEM_PROMPT)
|
116 |
+
update = st.sidebar.button("Update settings", use_container_width=True)
|
117 |
+
|
118 |
+
if update:
|
119 |
+
# Update configuration and system prompt
|
120 |
+
config.OLLAMA_SERVER = ollama_server
|
121 |
+
SYSTEM_PROMPT = update_prompt
|
122 |
+
if len(st.session_state.messages) == 0:
|
123 |
+
st.session_state.messages.insert(0, create_message("system", SYSTEM_PROMPT))
|
124 |
+
elif st.session_state.messages[0]["role"] == "system":
|
125 |
+
st.session_state.messages[0]["content"] = SYSTEM_PROMPT
|
126 |
+
|
127 |
+
if st.sidebar.button("Clear Chat", use_container_width=True):
|
128 |
+
clear_chat()
|
129 |
+
|
130 |
+
# Render chat interface and handle user input
|
131 |
+
render_chat_messages()
|
132 |
+
handle_user_input(client, model)
|
133 |
+
|
134 |
+
|
135 |
+
if __name__ == "__main__":
|
136 |
+
main()
|
config.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
2 |
+
|
3 |
+
|
4 |
+
class Settings(BaseSettings):
|
5 |
+
OLLAMA_SERVER: str
|
6 |
+
|
7 |
+
model_config = SettingsConfigDict(env_file="env", env_file_encoding="utf-8")
|
8 |
+
|
9 |
+
|
10 |
+
# Instantiate the settings
|
11 |
+
config = Settings()
|
dev.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OLLAMA_SERVER=http://127.0.0.1:11434
|
prompts.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
SYSTEM_PROMPT = "You are a helpful assistant."
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
streamlit
|
3 |
+
pydantic
|
4 |
+
pydantic-settings
|
5 |
+
requests
|