Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Load your fine-tuned model from the Hub | |
| MODEL_ID = "tasal9/ZamAI-mT5-Pashto" | |
| generator = pipeline( | |
| "text2text-generation", | |
| model=MODEL_ID, | |
| tokenizer=MODEL_ID, | |
| device=-1 # CPU only | |
| ) | |
| # Prompt template | |
| def generate_prompt(instruction, input_text=""): | |
| if input_text: | |
| return ( | |
| f"Below is an instruction that describes a task, paired with an input that provides further context. " | |
| f"Write a response that appropriately completes the request.\n\n" | |
| f"### Instruction:\n{instruction}\n\n" | |
| f"### Input:\n{input_text}\n\n" | |
| f"### Response:" | |
| ) | |
| else: | |
| return ( | |
| f"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n" | |
| f"### Instruction:\n{instruction}\n\n" | |
| f"### Response:" | |
| ) | |
| # Inference function | |
| def predict(instruction, input_text): | |
| prompt = generate_prompt(instruction, input_text) | |
| outputs = generator( | |
| prompt, | |
| max_length=256, | |
| num_beams=5, | |
| early_stopping=True | |
| ) | |
| return outputs[0]["generated_text"] | |
| # Gradio interface | |
| iface = gr.Interface( | |
| fn=predict, | |
| inputs=[ | |
| gr.inputs.Textbox(lines=2, placeholder="Enter instruction here...", label="Instruction"), | |
| gr.inputs.Textbox(lines=2, placeholder="Enter optional input here...", label="Input") | |
| ], | |
| outputs=gr.outputs.Textbox(label="Response"), | |
| title="ZamAI mT5 Pashto Demo", | |
| description="A zero-GPU Gradio demo for the ZamAI-mT5-Pashto model." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |