3ed0k4 commited on
Commit
5296ad6
·
verified ·
1 Parent(s): c70acf3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+ import gradio as gr
4
+
5
+ # 1) Load the Central Kurdish (Arabic) Goldfish model
6
+ MODEL_ID = "goldfish-models/ckb_arab_full"
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
8
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
9
+ model.eval()
10
+
11
+ # 2) Chat function: maintains history and generates replies
12
+ def chat_fn(user_message, history):
13
+ # Prepend [CLS] token and append [SEP]
14
+ prompt = tokenizer.cls_token + user_message + tokenizer.sep_token
15
+ inputs = tokenizer(prompt, return_tensors="pt")
16
+ with torch.no_grad():
17
+ outputs = model.generate(
18
+ **inputs,
19
+ max_new_tokens=128,
20
+ pad_token_id=tokenizer.eos_token_id,
21
+ do_sample=True,
22
+ top_p=0.9,
23
+ temperature=0.8
24
+ )
25
+ # Decode only the newly generated tokens
26
+ reply = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
27
+ history = history + [(user_message, reply)]
28
+ return history, history
29
+
30
+ # 3) Build Gradio Chat UI
31
+ with gr.Blocks() as demo:
32
+ gr.Markdown("## Chat with Goldfish’s Central Kurdish (Arabic) Model")
33
+ chatbot = gr.Chatbot()
34
+ txt = gr.Textbox(placeholder="Type your message here...")
35
+ clear = gr.Button("Clear")
36
+ txt.submit(chat_fn, [txt, chatbot], [chatbot, chatbot])
37
+ clear.click(lambda: None, None, chatbot)
38
+
39
+ # 4) Launch the app
40
+ if __name__ == "__main__":
41
+ demo.launch()