import gradio as gr from transformers import pipeline # Escolha um modelo menor para evitar sobrecarga model_name = "google/flan-t5-small" # Modelo exemplo pipe = pipeline("text2text-generation", model=model_name) def generate_text(prompt): return pipe(prompt)[0]['generated_text'] iface = gr.Interface( fn=generate_text, inputs="text", outputs="text", title="Text Generation API", description="Gera texto a partir de um prompt usando um modelo Hugging Face." ) if __name__ == "__main__": iface.launch(server_name="0.0.0.0", server_port=8080)