Spaces:
Running
Running
Commit
·
ab37bbe
1
Parent(s):
a1a2a18
fix: correct path for togheter AI model
Browse files- prediction.py +8 -8
prediction.py
CHANGED
@@ -25,14 +25,14 @@ from tqdm import tqdm
|
|
25 |
import subprocess
|
26 |
|
27 |
# https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/132
|
28 |
-
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
29 |
|
30 |
-
pipeline = hf_pipeline(
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
)
|
36 |
|
37 |
|
38 |
class ModelPrediction:
|
@@ -140,7 +140,7 @@ class ModelPrediction:
|
|
140 |
elif "DeepSeek-R1-Distill-Llama-70B" in model_name:
|
141 |
model_name = "together_ai/deepseek-ai/DeepSeek-R1-Distill-Llama-70B"
|
142 |
elif "llama-8" in model_name:
|
143 |
-
model_name = "together_ai/meta-llama/Meta-Llama-3-8B-Instruct"
|
144 |
else:
|
145 |
raise ValueError("Model forbidden")
|
146 |
|
|
|
25 |
import subprocess
|
26 |
|
27 |
# https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/132
|
28 |
+
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
29 |
|
30 |
+
# pipeline = hf_pipeline(
|
31 |
+
# "text-generation",
|
32 |
+
# model="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
33 |
+
# model_kwargs={"torch_dtype": 'bfloat16'},
|
34 |
+
# device_map="auto",
|
35 |
+
# )
|
36 |
|
37 |
|
38 |
class ModelPrediction:
|
|
|
140 |
elif "DeepSeek-R1-Distill-Llama-70B" in model_name:
|
141 |
model_name = "together_ai/deepseek-ai/DeepSeek-R1-Distill-Llama-70B"
|
142 |
elif "llama-8" in model_name:
|
143 |
+
model_name = "together_ai/meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
|
144 |
else:
|
145 |
raise ValueError("Model forbidden")
|
146 |
|