Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import torch
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
|
| 5 |
from threading import Thread
|
| 6 |
|
| 7 |
-
model_id = "rasyosef/
|
| 8 |
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
@@ -38,11 +38,7 @@ def generate(message, chat_history, max_new_tokens=256):
|
|
| 38 |
kwargs={
|
| 39 |
"text_inputs":history,
|
| 40 |
"max_new_tokens":max_new_tokens,
|
| 41 |
-
"
|
| 42 |
-
"do_sample":True,
|
| 43 |
-
"top_k":4,
|
| 44 |
-
"top_p":0.8,
|
| 45 |
-
"repetition_penalty":1.25,
|
| 46 |
"streamer":streamer
|
| 47 |
}
|
| 48 |
)
|
|
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
|
| 5 |
from threading import Thread
|
| 6 |
|
| 7 |
+
model_id = "rasyosef/Llama-3.2-180M-Amharic-Instruct"
|
| 8 |
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
|
|
| 38 |
kwargs={
|
| 39 |
"text_inputs":history,
|
| 40 |
"max_new_tokens":max_new_tokens,
|
| 41 |
+
"repetition_penalty":1.1,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
"streamer":streamer
|
| 43 |
}
|
| 44 |
)
|