| import gradio as gr | |
| from transformers import pipeline | |
| import torch | |
| device = 0 if torch.cuda.is_available() else -1 | |
| pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", device=device) | |
| def responder(prompt): | |
| respuesta = pipe(prompt, max_new_tokens=80, do_sample=True, temperature=0.7)[0]['generated_text'] | |
| return respuesta | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🤖 AmInside 1.0\nTu asistente IA gratuito en Hugging Face") | |
| entrada = gr.Textbox(label="Escribe tu mensaje") | |
| salida = gr.Textbox(label="Respuesta") | |
| entrada.submit(fn=responder, inputs=entrada, outputs=salida) | |
| demo.launch() | |