from transformers import AutoProcessor, AutoModelForSeq2SeqLM import gradio as gr # Load model processor = AutoProcessor.from_pretrained("Alpha-VLLM/Lumina-mGPT-7B-768") model = AutoModelForSeq2SeqLM.from_pretrained("Alpha-VLLM/Lumina-mGPT-7B-768") def generate_text(input_text): inputs = processor(input_text, return_tensors="pt") outputs = model.generate(**inputs) return processor.decode(outputs[0], skip_special_tokens=True) # Gradio Interface iface = gr.Interface( fn=generate_text, inputs=gr.Textbox(label="Input Text"), outputs=gr.Textbox(label="Generated Output"), title="Lumina mGPT-7B-768", description="Enter text to generate output using Lumina-mGPT-7B-768." ) iface.launch()