Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,15 +10,6 @@ import ollama
|
|
| 10 |
# List of available models for selection.
|
| 11 |
# IMPORTANT: These names must correspond to models that have been either
|
| 12 |
|
| 13 |
-
ollama pull hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
|
| 14 |
-
#ollama pull hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
|
| 15 |
-
ollama pull smollm2:360m-instruct-q5_K_M
|
| 16 |
-
ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
|
| 17 |
-
#ollama pull gemma3n:e2b-it-q4_K_M #slow on Spaces CPU
|
| 18 |
-
ollama pull granite3.3:2b
|
| 19 |
-
ollama pull hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M
|
| 20 |
-
|
| 21 |
-
|
| 22 |
# Model from run.sh
|
| 23 |
AVAILABLE_MODELS = [
|
| 24 |
'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
|
@@ -30,10 +21,6 @@ AVAILABLE_MODELS = [
|
|
| 30 |
'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
|
| 31 |
]
|
| 32 |
|
| 33 |
-
#---fail to run
|
| 34 |
-
#'hf.co/ggml-org/SmolLM3-3B-GGUF:Q4_K_M',
|
| 35 |
-
#'hf.co/bartowski/nvidia_OpenReasoning-Nemotron-1.5B-GGUF:Q5_K_M',
|
| 36 |
-
|
| 37 |
|
| 38 |
# Default System Prompt
|
| 39 |
DEFAULT_SYSTEM_PROMPT = """
|
|
@@ -92,7 +79,8 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
| 92 |
SYSTEM_PROMPT_OPTIONS = {
|
| 93 |
"Smart & Accurate (Default)": DEFAULT_SYSTEM_PROMPT,
|
| 94 |
"Friendly & Conversational": """Respond in a warm, friendly, and engaging tone. Use natural language and offer helpful suggestions. Keep responses concise but personable.""",
|
| 95 |
-
"Professional & Formal": """Maintain a formal and professional tone. Use precise language, avoid slang, and ensure responses are suitable for business or academic contexts."""
|
|
|
|
| 96 |
}
|
| 97 |
|
| 98 |
system_prompt_selector = gr.Radio(
|
|
|
|
| 10 |
# List of available models for selection.
|
| 11 |
# IMPORTANT: These names must correspond to models that have been either
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# Model from run.sh
|
| 14 |
AVAILABLE_MODELS = [
|
| 15 |
'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
|
|
|
| 21 |
'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
|
| 22 |
]
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
# Default System Prompt
|
| 26 |
DEFAULT_SYSTEM_PROMPT = """
|
|
|
|
| 79 |
SYSTEM_PROMPT_OPTIONS = {
|
| 80 |
"Smart & Accurate (Default)": DEFAULT_SYSTEM_PROMPT,
|
| 81 |
"Friendly & Conversational": """Respond in a warm, friendly, and engaging tone. Use natural language and offer helpful suggestions. Keep responses concise but personable.""",
|
| 82 |
+
"Professional & Formal": """Maintain a formal and professional tone. Use precise language, avoid slang, and ensure responses are suitable for business or academic contexts.""",
|
| 83 |
+
"Elon Musk style": "You must chat in Elon Musk style!"
|
| 84 |
}
|
| 85 |
|
| 86 |
system_prompt_selector = gr.Radio(
|