Spaces:
Build error
Build error
lorocksUMD
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -29,7 +29,7 @@ import re
|
|
29 |
"""
|
30 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
31 |
"""
|
32 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
33 |
|
34 |
# Functions for inference
|
35 |
def image_parser(args):
|
@@ -53,8 +53,10 @@ def load_images(image_files):
|
|
53 |
out.append(image)
|
54 |
return out
|
55 |
|
56 |
-
model_path = "liuhaotian/llava-v1.6-mistral-7b"
|
|
|
57 |
model_name = get_model_name_from_path(model_path)
|
|
|
58 |
# tokenizer = AutoTokenizer.from_pretrained(model_path)
|
59 |
# model = LlavaMistralForCausalLM.from_pretrained(
|
60 |
# model_path,
|
@@ -166,30 +168,31 @@ def respond(
|
|
166 |
temperature,
|
167 |
top_p,
|
168 |
):
|
169 |
-
messages = [{"role": "system", "content": system_message}]
|
170 |
-
|
171 |
-
for val in history:
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
messages.append({"role": "user", "content": message})
|
178 |
-
|
179 |
-
response = ""
|
180 |
-
|
181 |
-
for message in client.chat_completion(
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
):
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
|
|
193 |
|
194 |
"""
|
195 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
|
|
29 |
"""
|
30 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
31 |
"""
|
32 |
+
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
33 |
|
34 |
# Functions for inference
|
35 |
def image_parser(args):
|
|
|
53 |
out.append(image)
|
54 |
return out
|
55 |
|
56 |
+
# model_path = "liuhaotian/llava-v1.6-mistral-7b"
|
57 |
+
mode_path = "model_weights/"
|
58 |
model_name = get_model_name_from_path(model_path)
|
59 |
+
|
60 |
# tokenizer = AutoTokenizer.from_pretrained(model_path)
|
61 |
# model = LlavaMistralForCausalLM.from_pretrained(
|
62 |
# model_path,
|
|
|
168 |
temperature,
|
169 |
top_p,
|
170 |
):
|
171 |
+
# messages = [{"role": "system", "content": system_message}]
|
172 |
+
|
173 |
+
# for val in history:
|
174 |
+
# if val[0]:
|
175 |
+
# messages.append({"role": "user", "content": val[0]})
|
176 |
+
# if val[1]:
|
177 |
+
# messages.append({"role": "assistant", "content": val[1]})
|
178 |
+
|
179 |
+
# messages.append({"role": "user", "content": message})
|
180 |
+
|
181 |
+
# response = ""
|
182 |
+
|
183 |
+
# for message in client.chat_completion(
|
184 |
+
# messages,
|
185 |
+
# max_tokens=max_tokens,
|
186 |
+
# stream=True,
|
187 |
+
# temperature=temperature,
|
188 |
+
# top_p=top_p,
|
189 |
+
# ):
|
190 |
+
# token = message.choices[0].delta.content
|
191 |
+
|
192 |
+
# response += token
|
193 |
+
# yield response
|
194 |
+
yield "ungaa bungaa"
|
195 |
+
|
196 |
|
197 |
"""
|
198 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|