GIGAParviz commited on
Commit
c7bf5ca
·
verified ·
1 Parent(s): 71a51a0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
+ import gc
4
+ import torch
5
+
6
+ def clear_memory():
7
+ gc.collect()
8
+ torch.cuda.empty_cache()
9
+
10
+ model_name = "GIGAParviz/Firooze_test"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name)
13
+ model = model.to("cpu")
14
+
15
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=512)
16
+
17
+ def generate_response(prompt):
18
+ clear_memory()
19
+ instruction = f"### Instruction:\n{prompt}\n\n### Response:\n"
20
+ result = pipe(instruction)
21
+
22
+ return result[0]['generated_text'][len(instruction):]
23
+
24
+ with gr.Blocks() as demo:
25
+ gr.Markdown("<h1 style='text-align: center;'>🔮 Persian LLM made by A.M.Parviz</h1>")
26
+
27
+ prompt_input = gr.Textbox(label="Enter Prompt", placeholder="Type your prompt here...", lines=2)
28
+
29
+ generate_button = gr.Button("Generate Response")
30
+
31
+ response_output = gr.Textbox(label="Generated Response", lines=5)
32
+
33
+ generate_button.click(fn=generate_response, inputs=prompt_input, outputs=response_output)
34
+
35
+ clear_button = gr.ClearButton([prompt_input, response_output])
36
+
37
+ demo.launch()