|
import torch |
|
from transformers import ViTForImageClassification, ViTImageProcessor |
|
import torch.nn.functional as F |
|
from PIL import Image |
|
import gradio as gr |
|
|
|
model = ViTForImageClassification.from_pretrained('Tirath5504/IPD-Image-ViT-Finetune') |
|
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') |
|
|
|
class_names = ['cut_throat_gesture', 'finger_gun_to_the_head', 'middle_finger', 'slanted_eyes_gesture', 'swastika'] |
|
|
|
def predict(image): |
|
inputs = processor(images=image, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs).logits |
|
|
|
|
|
|
|
|
|
|
|
|
|
probabilities = F.softmax(outputs, dim=1) |
|
predicted_class_idx = probabilities.argmax(-1).item() |
|
predicted_class = class_names[predicted_class_idx] |
|
confidence_score = probabilities[0][predicted_class_idx].item() |
|
|
|
return predicted_class, confidence_score |
|
|
|
iface = gr.Interface(fn=predict, |
|
inputs=gr.Image(type="pil"), |
|
outputs=[gr.Label(num_top_classes=1, label="Class"), gr.Label(label="Score")], |
|
title="Hateful Content Detection", |
|
description="Upload an image to classify hateful gestures or symbols") |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |