| import gradio as gr | |
| from transformers import pipeline | |
| import torch | |
| import os | |
| from huggingface_hub import login | |
| hf_token = os.environ["HF_TOKEN"] | |
| login(token=hf_token) | |
| device = 0 if torch.cuda.is_available() else -1 | |
| pipe = pipeline("text-generation", model="google/gemma-3n-E4B-it-litert-preview", device=device) | |
| def responder(prompt): | |
| respuesta = pipe(prompt, max_new_tokens=80, do_sample=True, temperature=0.7)[0]['generated_text'] | |
| return respuesta | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🤖 AmInside 1.0\nTu asistente IA gratuito en Hugging Face") | |
| entrada = gr.Textbox(label="Escribe tu mensaje") | |
| salida = gr.Textbox(label="Respuesta") | |
| entrada.submit(fn=responder, inputs=entrada, outputs=salida) | |
| demo.launch() | |