import torch from transformers import ViTImageProcessor, ViTForImageClassification from PIL import Image import torch.nn.functional as F import gradio as gr device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') processor = ViTImageProcessor.from_pretrained("ViT_LCZs_v3",local_files_only=True) model = ViTForImageClassification.from_pretrained("ViT_LCZs_v3",local_files_only=True).to(device) def classify_image(image): with torch.no_grad(): model.eval() inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits prob = torch.nn.functional.softmax(logits, dim=1) top10_prob, top10_indices = torch.topk(prob, 10) top10_confidences = {} for i in range(10): top10_confidences[model.config.id2label[int(top10_indices[0][i])]] = float(top10_prob[0][i]) return top10_confidences #confidences with gr.Blocks(title="ViT LCZ Classification - ClassCat", css=".gradio-container {background:white;}" ) as demo: gr.HTML("""
LCZ Classification with ViT
""") with gr.Row(): input_image = gr.Image(type="pil") output_label=gr.Label(label="Probabilities", num_top_classes=3) send_btn = gr.Button("Classify") send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label) with gr.Row(): gr.Examples(['data/closed_highrise.png'], label='Closed highrise', inputs=input_image) gr.Examples(['data/open_lowrise.png'], label='Sparsey built', inputs=input_image) gr.Examples(['data/dense_trees.png'], label='Dense trees', inputs=input_image) gr.Examples(['data/large_lowrise.png'], label='Large lowrise', inputs=input_image) demo.launch(debug=True)