Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ from huggingface_hub import InferenceClient
|
|
3 |
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
|
|
6 |
|
7 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B")
|
8 |
|
@@ -38,15 +39,6 @@ def respond(
|
|
38 |
|
39 |
response += token
|
40 |
yield response
|
41 |
-
"""
|
42 |
-
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
43 |
-
|
44 |
-
pipeline = transformers.pipeline(
|
45 |
-
"text-generation",
|
46 |
-
model=model_id,
|
47 |
-
model_kwargs={"torch_dtype": torch.bfloat16},
|
48 |
-
device="cuda",
|
49 |
-
)
|
50 |
|
51 |
def chat_function(message, history, system_prompt, max_new_tokens, temperature):
|
52 |
messages = [{"role":"system","content":system_prompt},
|
|
|
3 |
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
+
"""
|
7 |
|
8 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B")
|
9 |
|
|
|
39 |
|
40 |
response += token
|
41 |
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
def chat_function(message, history, system_prompt, max_new_tokens, temperature):
|
44 |
messages = [{"role":"system","content":system_prompt},
|