|
import os |
|
import torch |
|
import gradio as gr |
|
from PIL import Image |
|
from torchvision import transforms |
|
|
|
torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") |
|
|
|
model = torch.hub.load('pytorch/vision:v0.9.0', 'alexnet', pretrained=True) |
|
model.eval() |
|
|
|
|
|
os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt") |
|
|
|
def inference(input_image): |
|
|
|
preprocess = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), |
|
]) |
|
input_tensor = preprocess(input_image) |
|
input_batch = input_tensor.unsqueeze(0) |
|
|
|
|
|
if torch.cuda.is_available(): |
|
input_batch = input_batch.to('cuda') |
|
model.to('cuda') |
|
|
|
with torch.no_grad(): |
|
output = model(input_batch) |
|
|
|
probabilities = torch.nn.functional.softmax(output[0], dim=0) |
|
|
|
with open("imagenet_classes.txt", "r") as f: |
|
categories = [s.strip() for s in f.readlines()] |
|
|
|
top5_prob, top5_catid = torch.topk(probabilities, 5) |
|
result = {} |
|
for i in range(top5_prob.size(0)): |
|
result[categories[top5_catid[i]]] = top5_prob[i].item() |
|
return result |
|
|
|
inputs = gr.inputs.Image(type='pil') |
|
outputs = gr.outputs.Label(type="confidences",num_top_classes=5) |
|
|
|
title = "ALEXNET" |
|
description = "Gradio demo for Alexnet, the 2012 ImageNet winner achieved a top-5 error of 15.3%, more than 10.8 percentage points lower than that of the runner up. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." |
|
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1404.5997'>One weird trick for parallelizing convolutional neural networks</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py'>Github Repo</a></p>" |
|
|
|
examples = [ |
|
['dog.jpg'] |
|
] |
|
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch() |