Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,22 @@
|
|
1 |
-
import subprocess
|
2 |
import os
|
3 |
-
from
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
#
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
#
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# ๐น Load the model
|
26 |
-
print("๐ฅ Loading the model...")
|
27 |
-
model = Llama(model_path=MODEL_PATH, n_ctx=4096, n_threads=8)
|
28 |
-
print("โ
Model loaded successfully!")
|
29 |
-
|
30 |
-
# ๐น Define a function to interact with the model
|
31 |
-
def chat_with_mistral(prompt):
|
32 |
-
response = model(prompt, max_tokens=512)
|
33 |
-
return response["choices"][0]["text"]
|
34 |
-
|
35 |
-
# ๐น Create a Gradio UI
|
36 |
-
iface = gr.Interface(fn=chat_with_mistral, inputs="text", outputs="text", title="Mistral-7B Chatbot")
|
37 |
-
|
38 |
-
# ๐น Launch the app
|
39 |
-
iface.launch()
|
|
|
|
|
1 |
import os
|
2 |
+
from ctransformers import AutoModelForCausalLM
|
3 |
+
|
4 |
+
# Define the model repository and file
|
5 |
+
model_repo = "TheBloke/OpenHermes-2-Mistral-7B-GGUF"
|
6 |
+
model_file = "openhermes-2-mistral-7b.Q4_K_M.gguf"
|
7 |
+
|
8 |
+
# Download the model using ctransformers
|
9 |
+
print(f"Downloading {model_file} from {model_repo}...")
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
model_repo,
|
12 |
+
model_file=model_file,
|
13 |
+
model_type="mistral",
|
14 |
+
# Uncomment the following line if you have a CUDA-capable GPU
|
15 |
+
# gpu_layers=50
|
16 |
+
)
|
17 |
+
print("Model downloaded and loaded successfully.")
|
18 |
+
|
19 |
+
# Test the model with a simple prompt
|
20 |
+
prompt = "AI is going to"
|
21 |
+
response = model(prompt)
|
22 |
+
print(f"Prompt: {prompt}\nResponse: {response}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|