Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,29 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
|
|
3 |
from llama_cpp import Llama
|
4 |
-
import requests
|
5 |
|
6 |
-
#
|
7 |
-
|
8 |
-
MODEL_PATH =
|
9 |
-
MODEL_URL = "https://huggingface.co/TheBloke/Phi-2-GGUF/resolve/main/phi-2.Q4_K_M.gguf"
|
10 |
|
11 |
-
#
|
12 |
-
os.makedirs(
|
13 |
|
14 |
-
#
|
15 |
if not os.path.exists(MODEL_PATH):
|
16 |
-
print("
|
17 |
-
|
18 |
-
with open(MODEL_PATH, "wb") as f:
|
19 |
-
for chunk in response.iter_content(chunk_size=8192):
|
20 |
-
f.write(chunk)
|
21 |
-
print("Download complete!")
|
22 |
|
23 |
-
# Load
|
24 |
print("Loading model...")
|
25 |
-
model = Llama(model_path=MODEL_PATH)
|
26 |
-
print("Model loaded successfully!")
|
27 |
|
28 |
-
# Define function for
|
29 |
-
def
|
30 |
-
|
31 |
-
return
|
32 |
|
33 |
-
#
|
34 |
-
iface = gr.Interface(
|
35 |
-
|
36 |
-
inputs=gr.Textbox(lines=2, placeholder="Ask your question here..."),
|
37 |
-
outputs="text",
|
38 |
-
title="Phi-2 AI Assistant",
|
39 |
-
description="Ask anything and get detailed responses!",
|
40 |
-
)
|
41 |
-
|
42 |
-
# Launch the interface
|
43 |
-
if __name__ == "__main__":
|
44 |
-
iface.launch()
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
import subprocess
|
4 |
from llama_cpp import Llama
|
|
|
5 |
|
6 |
+
# Model download link (Modify if needed)
|
7 |
+
MODEL_URL = "https://huggingface.co/TheBloke/Mistral-7B-GGUF/resolve/main/mistral-7b.Q4_K_M.gguf"
|
8 |
+
MODEL_PATH = "./models/mistral-7b.Q4_K_M.gguf"
|
|
|
9 |
|
10 |
+
# Create models directory if not exists
|
11 |
+
os.makedirs("./models", exist_ok=True)
|
12 |
|
13 |
+
# Auto-download model if not present
|
14 |
if not os.path.exists(MODEL_PATH):
|
15 |
+
print("Downloading Mistral-7B Q4 GGUF model...")
|
16 |
+
subprocess.run(["wget", MODEL_URL, "-O", MODEL_PATH], check=True)
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# Load GGUF model
|
19 |
print("Loading model...")
|
20 |
+
model = Llama(model_path=MODEL_PATH, n_ctx=4096, n_threads=8)
|
|
|
21 |
|
22 |
+
# Define function for chat
|
23 |
+
def chat_with_ai(prompt):
|
24 |
+
response = model(prompt, max_tokens=512, stop=["</s>"])
|
25 |
+
return response["choices"][0]["text"]
|
26 |
|
27 |
+
# Gradio UI
|
28 |
+
iface = gr.Interface(fn=chat_with_ai, inputs="text", outputs="text", title="Mistral-7B GGUF Chatbot")
|
29 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|