import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load the model and tokenizer model_name = 'abinayam/gpt-2-tamil' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def correct_text(input_text): # Tokenize the input text input_ids = tokenizer.encode(input_text, return_tensors='pt') # Generate corrected text with torch.no_grad(): output = model.generate(input_ids, max_length=100, num_return_sequences=1, temperature=0.7) # Decode the generated text corrected_text = tokenizer.decode(output[0], skip_special_tokens=True) return corrected_text # Create the Gradio interface iface = gr.Interface( fn=correct_text, inputs=gr.Textbox(lines=5, placeholder="Enter Tamil text here..."), outputs=gr.Textbox(label="Corrected Text"), title="Tamil Spell Corrector and Grammar Checker", description="This app uses the 'abinayam/gpt-2-tamil' model to correct spelling and grammar in Tamil text.", ) # Launch the app iface.launch()