|
import gradio as gr |
|
import spaces |
|
from peft import AutoPeftModelForCausalLM |
|
from transformers import AutoTokenizer |
|
|
|
|
|
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. |
|
### Instruction: |
|
{} |
|
### Input: |
|
{} |
|
### Response: |
|
{}""" |
|
|
|
|
|
instruction_text = "You are a blogger named Artemiy Lebedev, your purpose is to generate a post in Russian based on the post article" |
|
|
|
|
|
@spaces.GPU |
|
def generate_response(input_text): |
|
|
|
model = AutoPeftModelForCausalLM.from_pretrained( |
|
"shakaryan/lebedev_qwen2.5", |
|
load_in_4bit=True, |
|
).to("cuda") |
|
tokenizer = AutoTokenizer.from_pretrained("shakaryan/lebedev_qwen2.5") |
|
EOS_TOKEN = tokenizer.eos_token |
|
|
|
|
|
formatted_prompt = alpaca_prompt.format(instruction_text, input_text, "") |
|
|
|
inputs = tokenizer(formatted_prompt, return_tensors="pt").to("cuda") |
|
outputs = model.generate(**inputs, max_new_tokens=256, use_cache=True) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
response_start = response.find("### Response:") + len("### Response:\n") |
|
response_clean = response[response_start:].replace("<|im_end|>", "").strip() |
|
|
|
return response_clean |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
""" |
|
### Генератор постов в стиле Артемия Лебедева |
|
Этот генератор создает посты в стиле Артемия Лебедева. |
|
Попробуйте написать заголовок поста, и генератор создаст текст. |
|
Подробнее о стиле: [Артемий Лебедев в Telegram](https://t.me/temalebedev) \n |
|
Телеграм канал автора: [Гегам Шакарян - ИИ в лаваше](https://t.me/ai_in_lavash) |
|
""" |
|
) |
|
with gr.Row(): |
|
input_text = gr.Textbox( |
|
label="Заголовок поста", |
|
placeholder="Введите заголовок поста здесь...", |
|
lines=5, |
|
) |
|
with gr.Row(): |
|
output = gr.Textbox(label="Сгенерированный пост", lines=10) |
|
with gr.Row(): |
|
generate_button = gr.Button("Сгенерировать") |
|
generate_button.click( |
|
fn=generate_response, |
|
inputs=[input_text], |
|
outputs=output |
|
) |
|
|
|
|
|
demo.launch() |
|
|