Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
model_name = "Qwen/Qwen2.5-7B-Instruct"
|
4 |
+
|
5 |
+
model = AutoModelForCausalLM.from_pretrained(
|
6 |
+
model_name,
|
7 |
+
torch_dtype="auto",
|
8 |
+
device_map="auto",
|
9 |
+
)
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
|
12 |
+
@spaces.GPU
|
13 |
+
def generate(prompt):
|
14 |
+
text = tokenizer.apply_chat_template(
|
15 |
+
messages,
|
16 |
+
tokenize=False,
|
17 |
+
add_generation_prompt=True
|
18 |
+
)
|
19 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
20 |
+
generated_ids = model.generate(
|
21 |
+
**model_inputs,
|
22 |
+
max_new_tokens=512,
|
23 |
+
)
|
24 |
+
generated_ids = [
|
25 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
26 |
+
]
|
27 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
28 |
+
|
29 |
+
return response
|
30 |
+
|
31 |
+
|
32 |
+
gr.Interface(
|
33 |
+
fn=generate,
|
34 |
+
inputs=gr.Text(),
|
35 |
+
outputs=gr.Text(),
|
36 |
+
).launch()
|