File size: 1,002 Bytes
90b98b6 3826ecc ae777bd 8d48227 9bdde61 90b98b6 ae777bd 90b98b6 8d48227 ae777bd 90b98b6 ae777bd 90b98b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "mradermacher/YandexGPT-5-Lite-8B-instruct-GGUF"
filename = "YandexGPT-5-Lite-8B-instruct.Q8_0.gguf"
torch_dtype = torch.float32
tokenizer = AutoTokenizer.from_pretrained(model_name, gguf_file=filename)
model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=filename, torch_dtype=torch_dtype)
def generate_text(input_text):
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(
**inputs,
max_new_tokens=300,
do_sample=True,
temperature=0.7
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
interface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=2, placeholder="Введите ваш запрос..."),
outputs="text",
title="Saiga YandexGPT 8B Demo",
description="Задайте вопрос модели Saiga YandexGPT 8B!"
)
interface.launch() |