Spaces:
Sleeping
Sleeping
github-actions
commited on
Commit
Β·
32503a2
1
Parent(s):
b086c48
Sync updates from source repository
Browse files- README.md +8 -7
- Vectara-logo.png +0 -0
- app.py +100 -62
- query.py +1 -1
- requirements.txt +1 -1
README.md
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
---
|
2 |
title: Ask Anabel
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
-
license: apache-2.0
|
10 |
pinned: false
|
11 |
-
|
|
|
12 |
---
|
|
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Ask Anabel
|
3 |
+
emoji: π
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: green
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.32.2
|
8 |
app_file: app.py
|
|
|
9 |
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: Ask SEL questions based on the works of Anabel Jensen
|
12 |
---
|
13 |
+
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
Vectara-logo.png
ADDED
![]() |
app.py
CHANGED
@@ -1,79 +1,117 @@
|
|
1 |
from omegaconf import OmegaConf
|
2 |
from query import VectaraQuery
|
3 |
import os
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
def isTrue(x) -> bool:
|
7 |
if isinstance(x, bool):
|
8 |
return x
|
9 |
return x.strip().lower() == 'true'
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
for output in stream:
|
32 |
-
outputs += output
|
33 |
-
yield outputs
|
34 |
-
else:
|
35 |
-
# Call non-stream response and return message output
|
36 |
-
response = vq.submit_query(message)
|
37 |
-
yield response
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
<td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td>
|
43 |
-
<td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
|
47 |
-
</tr>
|
48 |
-
</table>
|
49 |
-
'''
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
}
|
58 |
-
td {
|
59 |
-
vertical-align: middle;
|
60 |
-
border: none;
|
61 |
-
}
|
62 |
-
img {
|
63 |
-
width: 75%;
|
64 |
-
}
|
65 |
-
h1 {
|
66 |
-
font-size: 2em; /* Adjust the size as needed */
|
67 |
-
}
|
68 |
-
"""
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
if __name__ == "__main__":
|
79 |
-
|
|
|
1 |
from omegaconf import OmegaConf
|
2 |
from query import VectaraQuery
|
3 |
import os
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
from streamlit_pills import pills
|
7 |
+
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
max_examples = 6
|
11 |
|
12 |
def isTrue(x) -> bool:
|
13 |
if isinstance(x, bool):
|
14 |
return x
|
15 |
return x.strip().lower() == 'true'
|
16 |
|
17 |
+
def launch_bot():
|
18 |
+
def generate_response(question):
|
19 |
+
response = vq.submit_query(question)
|
20 |
+
return response
|
21 |
+
|
22 |
+
def generate_streaming_response(question):
|
23 |
+
response = vq.submit_query_streaming(question)
|
24 |
+
return response
|
25 |
+
|
26 |
+
def show_example_questions():
|
27 |
+
if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
|
28 |
+
selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
|
29 |
+
if selected_example:
|
30 |
+
st.session_state.ex_prompt = selected_example
|
31 |
+
st.session_state.first_turn = False
|
32 |
+
return True
|
33 |
+
return False
|
34 |
|
35 |
+
if 'cfg' not in st.session_state:
|
36 |
+
corpus_keys = str(os.environ['corpus_keys']).split(',')
|
37 |
+
cfg = OmegaConf.create({
|
38 |
+
'corpus_keys': corpus_keys,
|
39 |
+
'api_key': str(os.environ['api_key']),
|
40 |
+
'title': os.environ['title'],
|
41 |
+
'source_data_desc': os.environ['source_data_desc'],
|
42 |
+
'streaming': isTrue(os.environ.get('streaming', False)),
|
43 |
+
'prompt_name': os.environ.get('prompt_name', None),
|
44 |
+
'examples': os.environ.get('examples', None)
|
45 |
+
})
|
46 |
+
st.session_state.cfg = cfg
|
47 |
+
st.session_state.ex_prompt = None
|
48 |
+
st.session_state.first_turn = True
|
49 |
+
example_messages = [example.strip() for example in cfg.examples.split(",")]
|
50 |
+
st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
|
51 |
|
52 |
+
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
cfg = st.session_state.cfg
|
55 |
+
vq = st.session_state.vq
|
56 |
+
st.set_page_config(page_title=cfg.title, layout="wide")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
# left side content
|
59 |
+
with st.sidebar:
|
60 |
+
image = Image.open('Vectara-logo.png')
|
61 |
+
st.image(image, width=175)
|
62 |
+
st.markdown(f"## About\n\n"
|
63 |
+
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
st.markdown("---")
|
66 |
+
st.markdown(
|
67 |
+
"## How this works?\n"
|
68 |
+
"This app was built with [Vectara](https://vectara.com).\n"
|
69 |
+
"Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
|
70 |
+
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
|
71 |
+
)
|
72 |
+
st.markdown("---")
|
73 |
+
|
74 |
+
|
75 |
+
st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
76 |
+
|
77 |
+
if "messages" not in st.session_state.keys():
|
78 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
|
79 |
+
|
80 |
+
# Display chat messages
|
81 |
+
for message in st.session_state.messages:
|
82 |
+
with st.chat_message(message["role"]):
|
83 |
+
st.write(message["content"])
|
84 |
|
85 |
+
example_container = st.empty()
|
86 |
+
with example_container:
|
87 |
+
if show_example_questions():
|
88 |
+
example_container.empty()
|
89 |
+
st.rerun()
|
90 |
|
91 |
+
# select prompt from example question or user provided input
|
92 |
+
if st.session_state.ex_prompt:
|
93 |
+
prompt = st.session_state.ex_prompt
|
94 |
+
else:
|
95 |
+
prompt = st.chat_input()
|
96 |
+
if prompt:
|
97 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
98 |
+
with st.chat_message("user"):
|
99 |
+
st.write(prompt)
|
100 |
+
st.session_state.ex_prompt = None
|
101 |
+
|
102 |
+
# Generate a new response if last message is not from assistant
|
103 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
104 |
+
with st.chat_message("assistant"):
|
105 |
+
if cfg.streaming:
|
106 |
+
stream = generate_streaming_response(prompt)
|
107 |
+
response = st.write_stream(stream)
|
108 |
+
else:
|
109 |
+
with st.spinner("Thinking..."):
|
110 |
+
response = generate_response(prompt)
|
111 |
+
st.write(response)
|
112 |
+
message = {"role": "assistant", "content": response}
|
113 |
+
st.session_state.messages.append(message)
|
114 |
+
st.rerun()
|
115 |
+
|
116 |
if __name__ == "__main__":
|
117 |
+
launch_bot()
|
query.py
CHANGED
@@ -125,4 +125,4 @@ class VectaraQuery():
|
|
125 |
chunks.append(chunk)
|
126 |
yield chunk
|
127 |
|
128 |
-
return ''.join(chunks)
|
|
|
125 |
chunks.append(chunk)
|
126 |
yield chunk
|
127 |
|
128 |
+
return ''.join(chunks)
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
gradio==3.40.0
|
2 |
requests_to_curl==1.1.0
|
3 |
toml==0.10.2
|
4 |
omegaconf==2.3.0
|
5 |
syrupy==4.0.8
|
|
|
|
|
|
1 |
requests_to_curl==1.1.0
|
2 |
toml==0.10.2
|
3 |
omegaconf==2.3.0
|
4 |
syrupy==4.0.8
|
5 |
+
streamlit_pills==0.3.0
|