import gradio as gr import torch import spaces from transformers import RobertaForSequenceClassification, RobertaTokenizer # Load model and tokenizer model_dir = "roberta_imdb_finetuned" tokenizer = RobertaTokenizer.from_pretrained(model_dir) model = RobertaForSequenceClassification.from_pretrained(model_dir).to("cuda") # Set model to evaluation mode model.eval() # Define inference function @spaces.GPU def predict_sentiment(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512).to("cuda") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits prediction = torch.argmax(logits, dim=1).item() label_map = {0: "Negative", 1: "Positive"} return f"Predicted Sentiment: {label_map[prediction]}" # Gradio Interface gr.Interface( fn=predict_sentiment, inputs="text", outputs="text", title="Roberta Movie Review Classifier" ).launch()