Spaces:
Running
Running
File size: 3,488 Bytes
8885176 c148a01 22777ab 8885176 7839f2f 8885176 7839f2f 8885176 a2f53e0 8885176 a2f53e0 8885176 7a9d2b1 8885176 7839f2f 8885176 2dcb22c 75d203d 2dcb22c eb1e4df 7839f2f eb1e4df 8885176 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
from threading import Thread
model_id = "rasyosef/Llama-3.2-180M-Amharic-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
llama_am = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Function that accepts a prompt and generates text using the phi2 pipeline
def generate(message, chat_history, max_new_tokens=64):
history = []
for sent, received in chat_history:
history.append({"role": "user", "content": sent})
history.append({"role": "assistant", "content": received})
history.append({"role": "user", "content": message})
#print(history)
if len(tokenizer.apply_chat_template(history)) > 512:
yield "chat history is too long"
else:
# Streamer
streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=300.0)
thread = Thread(target=llama_am,
kwargs={
"text_inputs":history,
"max_new_tokens":max_new_tokens,
"repetition_penalty":1.15,
"streamer":streamer
}
)
thread.start()
generated_text = ""
for word in streamer:
generated_text += word
response = generated_text.strip()
yield response
# Chat interface with gradio
with gr.Blocks() as demo:
gr.Markdown("""
# Llama 3.2 180M Amharic Chatbot Demo
This chatbot was created using [Llama-3.2-180M-Amharic-Instruct](https://huggingface.co/rasyosef/Llama-3.2-180M-Amharic-Instruct), a finetuned version of my 180 million parameter [Llama 3.2 180M Amharic](https://huggingface.co/rasyosef/Llama-3.2-180M-Amharic) transformer model.
""")
tokens_slider = gr.Slider(8, 256, value=64, label="Maximum new tokens", info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.")
chatbot = gr.ChatInterface(
chatbot=gr.Chatbot(height=400),
fn=generate,
additional_inputs=[tokens_slider],
stop_btn=None,
cache_examples=False,
examples=[
["แฐแแแฃ แฅแแดแต แแ
?"],
["แจแขแตแฎแตแซ แแ แจแฐแ แตแ แแแตแ แแ?"],
["แจแขแตแฎแตแซ แจแแจแจแปแ แแแต แแ แแ แฉ?"],
["แจแ แแญแ แแฅแ แแแแ"],
["แฐแจแต แแแจแ\n\nแ
แฅแ แ แแ แณ"],
["แ แแต แ แตแแ แแแต แแแจแ"],
["แจแฐแฐแ แ แฝแแ แ แตแฐแซแจแต แแ แ แญแแต แแ? 'แ แแแณแ'แฃ 'แ แแณแ' แแญแ 'แแแแฐแ' แจแแ แแแฝ แตแฅแข 'แ แชแ แแแ แแ แญ'"],
["แจแแจแแณแญ แแ แจแฐแ แตแ แแแตแ แแ?"],
["แ แแ แจแ แแชแซ แแฌแแณแแต แแ แแ?"],
["แถแตแต แจแ แแชแซ แแแซแต แฅแแตแแ"],
["3 แจแ แแชแซ แแชแแฝแ แตแ แฅแแต"],
["5 แจแ แแชแซ แจแฐแแแฝแ แฅแแต"],
["แ แแตแต แจแ แแฎแ แแแฎแฝแ แฅแแตแแ"],
["แ แแแ แแญ แซแแตแ 7 แ แ
แแซแต แแแจแ"]
]
)
demo.queue().launch(debug=True) |