Update app.py
Browse files
app.py
CHANGED
|
@@ -28,7 +28,7 @@ def respond(
|
|
| 28 |
from llama_cpp_agent.chat_history.messages import Roles
|
| 29 |
print(model)
|
| 30 |
llm = Llama(
|
| 31 |
-
model_path="models/
|
| 32 |
n_gpu_layers=81,
|
| 33 |
)
|
| 34 |
provider = LlamaCppPythonProvider(llm)
|
|
@@ -82,7 +82,7 @@ demo = gr.ChatInterface(
|
|
| 82 |
step=0.05,
|
| 83 |
label="Top-p (nucleus sampling)",
|
| 84 |
),
|
| 85 |
-
gr.Dropdown(['Meta-Llama-3-70B-Instruct-Q3_K_M.gguf', 'Smaug-Llama-3-70B-Instruct-Q3_K_M.gguf'], label="Model"),
|
| 86 |
],
|
| 87 |
theme=gr.themes.Soft(primary_hue="green", secondary_hue="indigo", neutral_hue="zinc",font=[gr.themes.GoogleFont("Exo 2"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
|
| 88 |
block_background_fill_dark="*neutral_800"
|
|
|
|
| 28 |
from llama_cpp_agent.chat_history.messages import Roles
|
| 29 |
print(model)
|
| 30 |
llm = Llama(
|
| 31 |
+
model_path=f"models/{model}",
|
| 32 |
n_gpu_layers=81,
|
| 33 |
)
|
| 34 |
provider = LlamaCppPythonProvider(llm)
|
|
|
|
| 82 |
step=0.05,
|
| 83 |
label="Top-p (nucleus sampling)",
|
| 84 |
),
|
| 85 |
+
gr.Dropdown(['Meta-Llama-3-70B-Instruct-Q3_K_M.gguf', 'Smaug-Llama-3-70B-Instruct-Q3_K_M.gguf'], value="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf", label="Model"),
|
| 86 |
],
|
| 87 |
theme=gr.themes.Soft(primary_hue="green", secondary_hue="indigo", neutral_hue="zinc",font=[gr.themes.GoogleFont("Exo 2"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
|
| 88 |
block_background_fill_dark="*neutral_800"
|