Spaces:
Running
Running
import gradio as gr | |
import torch | |
from PIL import Image | |
# from PIL import ImageDraw | |
# from PIL import ImageFont | |
import cv2 | |
import numpy as np | |
import argparse | |
import pandas as pd | |
def parse_args(known=False): | |
parser = argparse.ArgumentParser(description="Object Counting v1.0") | |
parser.add_argument( | |
"--yolov5_path", | |
"-yp", | |
default="ultralytics/yolov5", | |
type=str, | |
help="yolov5 path", | |
) | |
parser.add_argument( | |
"--model_path", | |
"-mp", | |
default="model/yolov5n_rebar_kaggle.pt", | |
type=str, | |
help="model path", | |
) | |
parser.add_argument( | |
"--nms_conf", | |
"-conf", | |
default=0.25, | |
type=float, | |
help="model NMS confidence threshold", | |
) | |
parser.add_argument( | |
"--nms_iou", | |
"-iou", | |
default=0.1, | |
type=float, | |
help="model NMS IoU threshold" | |
) | |
parser.add_argument( | |
"--device", | |
"-dev", | |
default="cpu", | |
type=str, | |
help="cuda or cpu", | |
) | |
parser.add_argument("--inference_size", "-isz", default=640, type=int, help="model inference size") | |
parser.add_argument("--slider_step", "-ss", default=0.05, type=float, help="slider step") | |
args = parser.parse_known_args()[0] if known else parser.parse_args() | |
return args | |
def image_roi(im): | |
mask = np.array(im["mask"]) | |
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) | |
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
if contours: | |
mask = np.zeros(mask.shape, np.uint8) | |
cnt = contours[0] | |
mask = cv2.drawContours(mask, [cnt], 0, 255, -1) | |
im = np.array(im["image"]) | |
im = cv2.bitwise_and(im, im, mask=mask) | |
im = Image.fromarray(im) | |
else: | |
im = im["image"] | |
return im | |
def yolo(im, conf, iou, size): | |
global model | |
if im is None: | |
print ("No image") | |
return None, None | |
im = image_roi(im) | |
model.conf = conf | |
model.iou = iou | |
results = model(im, size=size) # custom inference size | |
output_im = np.array(im) | |
pred = results.pandas().xyxy[0] | |
counting = pred.shape[0] | |
text = f"{counting} objects" | |
for index, row in pred.iterrows(): | |
cv2.circle(output_im, (int((row["xmin"] + row["xmax"]) * 0.5), int((row["ymin"] + row["ymax"]) * 0.5)), int((row["xmax"] - row["xmin"]) * 0.5 * 0.6), (255, 0, 0), -1) | |
return Image.fromarray(output_im), text | |
def main(args): | |
gr.close_all() | |
global model | |
yolo_path = args.yolov5_path | |
model_path = args.model_path | |
nms_conf = args.nms_conf | |
nms_iou = args.nms_iou | |
device = args.device | |
inference_size = args.inference_size | |
slider_step = args.slider_step | |
model = torch.hub.load(yolo_path, 'custom', path=model_path, device=device) | |
inputs_image = gr.inputs.Image(tool="sketch", label="Original Image",type="pil") | |
inputs_conf = gr.Slider(0, 1, step=slider_step, value=nms_conf, label="Conf Thres") | |
inputs_iou = gr.Slider(0, 1, step=slider_step, value=nms_iou, label="IoU Thres") | |
inputs_size = gr.Slider(384, 1536, step=128, value=inference_size, label="Inference Size") | |
inputs = [inputs_image, inputs_conf, inputs_iou, inputs_size] | |
outputs_image = gr.outputs.Image(type="pil", label="Output Image") | |
outputs_text = gr.Textbox(label="Number of objects") | |
outputs = [outputs_image, outputs_text] | |
title = "OBJECT COUNTING DEMO" | |
description = "Upload an image or click an example image to use. You can select the area to count by drawing a closed area on the input image." | |
article = "<p style='text-align: center'>Counting objects in image</a></p>" | |
examples = [['./images/S__275668998.jpg'], ['./images/S__275669003.jpg'], ['./images/S__275669004.jpg']] | |
gr.Interface(fn=yolo, inputs=inputs, outputs=outputs, title=title, description=description, article=article, examples=examples, cache_examples=False, analytics_enabled=False).launch( | |
debug=True)#, share=True) | |
if __name__ == "__main__": | |
args = parse_args() | |
main(args) | |