Spaces:
Sleeping
Sleeping
add zephyr-7b
Browse files- streamlit_app.py +8 -8
streamlit_app.py
CHANGED
|
@@ -84,14 +84,14 @@ def init_qa(model, api_key=None):
|
|
| 84 |
frequency_penalty=0.1)
|
| 85 |
embeddings = OpenAIEmbeddings()
|
| 86 |
|
| 87 |
-
|
| 88 |
elif model == 'mistral-7b-instruct-v0.1':
|
| 89 |
chat = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1",
|
| 90 |
model_kwargs={"temperature": 0.01, "max_length": 4096, "max_new_tokens": 2048})
|
| 91 |
embeddings = HuggingFaceEmbeddings(
|
| 92 |
model_name="all-MiniLM-L6-v2")
|
| 93 |
-
|
| 94 |
-
|
|
|
|
| 95 |
model_kwargs={"temperature": 0.01, "max_length": 4096, "max_new_tokens": 2048})
|
| 96 |
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
| 97 |
else:
|
|
@@ -157,20 +157,20 @@ def play_old_messages():
|
|
| 157 |
with st.sidebar:
|
| 158 |
st.session_state['model'] = model = st.radio(
|
| 159 |
"Model",
|
| 160 |
-
("chatgpt-3.5-turbo", "mistral-7b-instruct-v0.1"
|
| 161 |
index=1,
|
| 162 |
captions=[
|
| 163 |
"ChatGPT 3.5 Turbo + Ada-002-text (embeddings)",
|
| 164 |
-
"Mistral-7B-Instruct-V0.1 + Sentence BERT (embeddings) :free:"
|
| 165 |
-
|
| 166 |
],
|
| 167 |
help="Select the LLM model and embeddings you want to use.",
|
| 168 |
disabled=st.session_state['doc_id'] is not None or st.session_state['uploaded'])
|
| 169 |
|
| 170 |
st.markdown(
|
| 171 |
-
":warning: Mistral
|
| 172 |
|
| 173 |
-
if model == 'mistral-7b-instruct-v0.1' and model not in st.session_state['api_keys']:
|
| 174 |
if 'HUGGINGFACEHUB_API_TOKEN' not in os.environ:
|
| 175 |
api_key = st.text_input('Huggingface API Key', type="password")
|
| 176 |
|
|
|
|
| 84 |
frequency_penalty=0.1)
|
| 85 |
embeddings = OpenAIEmbeddings()
|
| 86 |
|
|
|
|
| 87 |
elif model == 'mistral-7b-instruct-v0.1':
|
| 88 |
chat = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1",
|
| 89 |
model_kwargs={"temperature": 0.01, "max_length": 4096, "max_new_tokens": 2048})
|
| 90 |
embeddings = HuggingFaceEmbeddings(
|
| 91 |
model_name="all-MiniLM-L6-v2")
|
| 92 |
+
|
| 93 |
+
elif model == 'zephyr-7b-beta':
|
| 94 |
+
chat = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta",
|
| 95 |
model_kwargs={"temperature": 0.01, "max_length": 4096, "max_new_tokens": 2048})
|
| 96 |
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
| 97 |
else:
|
|
|
|
| 157 |
with st.sidebar:
|
| 158 |
st.session_state['model'] = model = st.radio(
|
| 159 |
"Model",
|
| 160 |
+
("chatgpt-3.5-turbo", "mistral-7b-instruct-v0.1", "zephyr-7b-beta"),
|
| 161 |
index=1,
|
| 162 |
captions=[
|
| 163 |
"ChatGPT 3.5 Turbo + Ada-002-text (embeddings)",
|
| 164 |
+
"Mistral-7B-Instruct-V0.1 + Sentence BERT (embeddings) :free:",
|
| 165 |
+
"Zephyr-7B-beta + Sentence BERT (embeddings) :free:"
|
| 166 |
],
|
| 167 |
help="Select the LLM model and embeddings you want to use.",
|
| 168 |
disabled=st.session_state['doc_id'] is not None or st.session_state['uploaded'])
|
| 169 |
|
| 170 |
st.markdown(
|
| 171 |
+
":warning: Mistral and Zephyr are free to use, however requests might hit limits of the huggingface free API and fail. :warning: ")
|
| 172 |
|
| 173 |
+
if (model == 'mistral-7b-instruct-v0.1' or model == 'zephyr-7b-beta') and model not in st.session_state['api_keys']:
|
| 174 |
if 'HUGGINGFACEHUB_API_TOKEN' not in os.environ:
|
| 175 |
api_key = st.text_input('Huggingface API Key', type="password")
|
| 176 |
|