github-actions commited on
Commit
32503a2
Β·
1 Parent(s): b086c48

Sync updates from source repository

Browse files
Files changed (5) hide show
  1. README.md +8 -7
  2. Vectara-logo.png +0 -0
  3. app.py +100 -62
  4. query.py +1 -1
  5. requirements.txt +1 -1
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
  title: Ask Anabel
3
- emoji: πŸš€
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.38.1
8
  app_file: app.py
9
- license: apache-2.0
10
  pinned: false
11
- short_description: Ask Anabel about Social Emotional Learning
 
12
  ---
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Ask Anabel
3
+ emoji: πŸ“ˆ
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: streamlit
7
+ sdk_version: 1.32.2
8
  app_file: app.py
 
9
  pinned: false
10
+ license: apache-2.0
11
+ short_description: Ask SEL questions based on the works of Anabel Jensen
12
  ---
13
+
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Vectara-logo.png ADDED
app.py CHANGED
@@ -1,79 +1,117 @@
1
  from omegaconf import OmegaConf
2
  from query import VectaraQuery
3
  import os
4
- import gradio as gr
 
 
 
 
 
 
5
 
6
  def isTrue(x) -> bool:
7
  if isinstance(x, bool):
8
  return x
9
  return x.strip().lower() == 'true'
10
 
11
- corpus_keys = str(os.environ['corpus_keys']).split(',')
12
- cfg = OmegaConf.create({
13
- 'corpus_keys': corpus_keys,
14
- 'api_key': str(os.environ['api_key']),
15
- 'title': os.environ['title'],
16
- 'source_data_desc': os.environ['source_data_desc'],
17
- 'streaming': isTrue(os.environ.get('streaming', False)),
18
- 'prompt_name': os.environ.get('prompt_name', None),
19
- 'examples': os.environ.get('examples', None)
20
- })
 
 
 
 
 
 
 
21
 
22
- vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
23
-
24
- def respond(message, history):
25
- if cfg.streaming:
26
- # Call stream response and stream output
27
- stream = vq.submit_query_streaming(message)
28
-
 
 
 
 
 
 
 
 
 
29
 
30
- outputs = ""
31
- for output in stream:
32
- outputs += output
33
- yield outputs
34
- else:
35
- # Call non-stream response and return message output
36
- response = vq.submit_query(message)
37
- yield response
38
 
39
- heading_html = f'''
40
- <table>
41
- <tr>
42
- <td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td>
43
- <td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
44
- </tr>
45
- <tr>
46
- <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
47
- </tr>
48
- </table>
49
- '''
50
 
51
- bot_css = """
52
- table {
53
- border: none;
54
- width: 100%;
55
- table-layout: fixed;
56
- border-collapse: separate;
57
- }
58
- td {
59
- vertical-align: middle;
60
- border: none;
61
- }
62
- img {
63
- width: 75%;
64
- }
65
- h1 {
66
- font-size: 2em; /* Adjust the size as needed */
67
- }
68
- """
69
 
70
- if cfg.examples:
71
- app_examples = [example.strip() for example in cfg.examples.split(",")]
72
- else:
73
- app_examples = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- demo = gr.ChatInterface(respond, description = heading_html, css = bot_css,
76
- chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False)
 
 
 
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  if __name__ == "__main__":
79
- demo.launch()
 
1
  from omegaconf import OmegaConf
2
  from query import VectaraQuery
3
  import os
4
+
5
+ import streamlit as st
6
+ from streamlit_pills import pills
7
+
8
+ from PIL import Image
9
+
10
+ max_examples = 6
11
 
12
  def isTrue(x) -> bool:
13
  if isinstance(x, bool):
14
  return x
15
  return x.strip().lower() == 'true'
16
 
17
+ def launch_bot():
18
+ def generate_response(question):
19
+ response = vq.submit_query(question)
20
+ return response
21
+
22
+ def generate_streaming_response(question):
23
+ response = vq.submit_query_streaming(question)
24
+ return response
25
+
26
+ def show_example_questions():
27
+ if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
28
+ selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
29
+ if selected_example:
30
+ st.session_state.ex_prompt = selected_example
31
+ st.session_state.first_turn = False
32
+ return True
33
+ return False
34
 
35
+ if 'cfg' not in st.session_state:
36
+ corpus_keys = str(os.environ['corpus_keys']).split(',')
37
+ cfg = OmegaConf.create({
38
+ 'corpus_keys': corpus_keys,
39
+ 'api_key': str(os.environ['api_key']),
40
+ 'title': os.environ['title'],
41
+ 'source_data_desc': os.environ['source_data_desc'],
42
+ 'streaming': isTrue(os.environ.get('streaming', False)),
43
+ 'prompt_name': os.environ.get('prompt_name', None),
44
+ 'examples': os.environ.get('examples', None)
45
+ })
46
+ st.session_state.cfg = cfg
47
+ st.session_state.ex_prompt = None
48
+ st.session_state.first_turn = True
49
+ example_messages = [example.strip() for example in cfg.examples.split(",")]
50
+ st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
51
 
52
+ st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
 
 
 
 
 
 
 
53
 
54
+ cfg = st.session_state.cfg
55
+ vq = st.session_state.vq
56
+ st.set_page_config(page_title=cfg.title, layout="wide")
 
 
 
 
 
 
 
 
57
 
58
+ # left side content
59
+ with st.sidebar:
60
+ image = Image.open('Vectara-logo.png')
61
+ st.image(image, width=175)
62
+ st.markdown(f"## About\n\n"
63
+ f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ st.markdown("---")
66
+ st.markdown(
67
+ "## How this works?\n"
68
+ "This app was built with [Vectara](https://vectara.com).\n"
69
+ "Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
70
+ "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
71
+ )
72
+ st.markdown("---")
73
+
74
+
75
+ st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
76
+
77
+ if "messages" not in st.session_state.keys():
78
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
79
+
80
+ # Display chat messages
81
+ for message in st.session_state.messages:
82
+ with st.chat_message(message["role"]):
83
+ st.write(message["content"])
84
 
85
+ example_container = st.empty()
86
+ with example_container:
87
+ if show_example_questions():
88
+ example_container.empty()
89
+ st.rerun()
90
 
91
+ # select prompt from example question or user provided input
92
+ if st.session_state.ex_prompt:
93
+ prompt = st.session_state.ex_prompt
94
+ else:
95
+ prompt = st.chat_input()
96
+ if prompt:
97
+ st.session_state.messages.append({"role": "user", "content": prompt})
98
+ with st.chat_message("user"):
99
+ st.write(prompt)
100
+ st.session_state.ex_prompt = None
101
+
102
+ # Generate a new response if last message is not from assistant
103
+ if st.session_state.messages[-1]["role"] != "assistant":
104
+ with st.chat_message("assistant"):
105
+ if cfg.streaming:
106
+ stream = generate_streaming_response(prompt)
107
+ response = st.write_stream(stream)
108
+ else:
109
+ with st.spinner("Thinking..."):
110
+ response = generate_response(prompt)
111
+ st.write(response)
112
+ message = {"role": "assistant", "content": response}
113
+ st.session_state.messages.append(message)
114
+ st.rerun()
115
+
116
  if __name__ == "__main__":
117
+ launch_bot()
query.py CHANGED
@@ -125,4 +125,4 @@ class VectaraQuery():
125
  chunks.append(chunk)
126
  yield chunk
127
 
128
- return ''.join(chunks)
 
125
  chunks.append(chunk)
126
  yield chunk
127
 
128
+ return ''.join(chunks)
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- gradio==3.40.0
2
  requests_to_curl==1.1.0
3
  toml==0.10.2
4
  omegaconf==2.3.0
5
  syrupy==4.0.8
 
 
 
1
  requests_to_curl==1.1.0
2
  toml==0.10.2
3
  omegaconf==2.3.0
4
  syrupy==4.0.8
5
+ streamlit_pills==0.3.0