Spaces:
Runtime error
Runtime error
trying to fix missing 1 required positional argument
Browse files
app.py
CHANGED
|
@@ -4,15 +4,13 @@ import torch
|
|
| 4 |
import torchvision
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
-
|
| 8 |
-
description = "Person detection, you can twik the corresponding confidence threshold. Good results even when face not visible."
|
| 9 |
-
article = "<p style='text-align: center'><a href='https://github.com/scoutant/yolo-persons-gradio' target='_blank' class='footer'>Github Repo</a></p>"
|
| 10 |
|
| 11 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
|
| 12 |
model.classes = [ 0 ] # only considering class 'person' and not the 79 other classes...
|
| 13 |
model.conf = 0.6 # only considering detection above the threshold.
|
| 14 |
|
| 15 |
-
def inference(img:PIL.Image.Image, threshold):
|
| 16 |
if img is None:
|
| 17 |
return None,0
|
| 18 |
images:List[PIL.Image.Image] = [ img ] # inference operates on a list of images
|
|
@@ -24,16 +22,10 @@ def inference(img:PIL.Image.Image, threshold):
|
|
| 24 |
|
| 25 |
gr.Interface(
|
| 26 |
fn = inference,
|
| 27 |
-
inputs = [
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
outputs = [
|
| 32 |
-
gr.components.Image(type="pil", label="Output"),
|
| 33 |
-
gr.components.Label(label="nb of persons detected for given confidence threshold")
|
| 34 |
-
],
|
| 35 |
-
title=title,
|
| 36 |
-
description=description,
|
| 37 |
article=article,
|
| 38 |
examples=[['data/businessmen-612.jpg'], ['data/businessmen-back.jpg']],
|
| 39 |
enable_queue=True,
|
|
|
|
| 4 |
import torchvision
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
+
article = "<p style='text-align: center'><a href='https://github.com/scoutant/yolo-person-gradio' target='_blank' class='footer'>Github Repo</a></p>"
|
|
|
|
|
|
|
| 8 |
|
| 9 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
|
| 10 |
model.classes = [ 0 ] # only considering class 'person' and not the 79 other classes...
|
| 11 |
model.conf = 0.6 # only considering detection above the threshold.
|
| 12 |
|
| 13 |
+
def inference(img:PIL.Image.Image, threshold:float=0.6):
|
| 14 |
if img is None:
|
| 15 |
return None,0
|
| 16 |
images:List[PIL.Image.Image] = [ img ] # inference operates on a list of images
|
|
|
|
| 22 |
|
| 23 |
gr.Interface(
|
| 24 |
fn = inference,
|
| 25 |
+
inputs = [ gr.inputs.Image(type="pil", label="Input"), gr.Slider(minimum=0.5, maximum=0.9, step=0.05, value=0.7, label="Confidence threshold") ],
|
| 26 |
+
outputs = [ gr.components.Image(type="pil", label="Output"), gr.components.Label(label="nb of persons detected for given confidence threshold") ],
|
| 27 |
+
title="Person detection with YOLO v5",
|
| 28 |
+
description="Person detection, you can twik the corresponding confidence threshold. Good results even when face not visible.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
article=article,
|
| 30 |
examples=[['data/businessmen-612.jpg'], ['data/businessmen-back.jpg']],
|
| 31 |
enable_queue=True,
|