Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,19 @@ import torch
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
|
| 5 |
from threading import Thread
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
# Function that accepts a prompt and generates text using the phi2 pipeline
|
| 8 |
def generate(message, chat_history, max_new_tokens=256):
|
| 9 |
|
|
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
|
| 5 |
from threading import Thread
|
| 6 |
|
| 7 |
+
model_id = "rasyosef/llama-3.2-amharic-64k-instruct-beta"
|
| 8 |
+
|
| 9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 10 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 11 |
+
|
| 12 |
+
llama_am = pipeline(
|
| 13 |
+
"text-generation",
|
| 14 |
+
model=model,
|
| 15 |
+
tokenizer=tokenizer,
|
| 16 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 17 |
+
eos_token_id=tokenizer.eos_token_id
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
# Function that accepts a prompt and generates text using the phi2 pipeline
|
| 21 |
def generate(message, chat_history, max_new_tokens=256):
|
| 22 |
|