Spaces:
Running
on
Zero
Running
on
Zero
""" | |
app.py β Anime Object-Detection Space (ZeroGPU ready) | |
β’ Gradio β₯ 4.44 (no more `concurrency_count=`). | |
β’ Pydantic pinned (>=2.10.0,<2.11) to avoid schema bug. | |
β’ One global @spaces.GPU wrapper so ZeroGPU is happy. | |
β’ Each detector class gets its own tab via .make_ui(). | |
""" | |
import os | |
import gradio as gr | |
import spaces | |
# ---- your existing detector classes -------------------------- | |
from detection import ( | |
EyesDetection, FaceDetection, HeadDetection, PersonDetection, | |
HandDetection, CensorDetection, HalfBodyDetection, | |
NudeNetDetection, BooruYOLODetection, | |
) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 0. Instantiate detectors once (they cache their models) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
face_det = FaceDetection() | |
head_det = HeadDetection() | |
person_det = PersonDetection() | |
halfbody_det = HalfBodyDetection() | |
eyes_det = EyesDetection() | |
hand_det = HandDetection() | |
censor_det = CensorDetection() | |
nudenet_det = NudeNetDetection() | |
booruyolo_det = BooruYOLODetection() | |
# A mapping so the GPU wrapper can call the right detector | |
DETECTORS = { | |
"face" : face_det, | |
"head" : head_det, | |
"person" : person_det, | |
"halfbody" : halfbody_det, | |
"eyes" : eyes_det, | |
"hand" : hand_det, | |
"censor" : censor_det, | |
"nudenet" : nudenet_det, | |
"booruyolo" : booruyolo_det, | |
} | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 1. Single top-level GPU function (ZeroGPU REQUIREMENT) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# <- makes HF allocate a ZeroGPU worker | |
def run_detection(img, det_key, model_name=None): | |
""" | |
Parameters | |
---------- | |
img : PIL.Image | numpy.ndarray β image from gr.Image | |
det_key : str β one of DETECTORS.keys() | |
model_name: str | None β optional model override | |
""" | |
detector = DETECTORS[det_key] | |
# Every detector already exposes .detect(img, model_name=...) | |
return detector.detect(img, model_name=model_name) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 2. Build the UI (mirrors the working public Space layout) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
_GLOBAL_CSS = ".limit-height { max-height: 55vh; }" | |
def build_ui() -> gr.Blocks: | |
with gr.Blocks(css=_GLOBAL_CSS) as demo: | |
with gr.Row(): | |
gr.HTML( | |
"<h2 style='text-align:center'>Object Detections For Anime</h2>" | |
) | |
gr.Markdown( | |
"Online demo for detection functions of " | |
"[imgutils.detect](https://dghs-imgutils.deepghs.org/main/api_doc/detect/index.html). \n" | |
"Install locally with `pip install dghs-imgutils`." | |
) | |
with gr.Row(): | |
with gr.Tabs(): | |
# ---- each tab reuses the detector's built-in UI ----------- | |
with gr.Tab("Face Detection"): | |
face_det.make_ui() | |
with gr.Tab("Head Detection"): | |
head_det.make_ui() | |
with gr.Tab("Person Detection"): | |
person_det.make_ui() | |
with gr.Tab("Half Body Detection"): | |
halfbody_det.make_ui() | |
with gr.Tab("Eyes Detection"): | |
eyes_det.make_ui() | |
with gr.Tab("Hand Detection"): | |
hand_det.make_ui() | |
with gr.Tab("Censor Point Detection"): | |
censor_det.make_ui() | |
with gr.Tab("NudeNet"): | |
nudenet_det.make_ui() | |
with gr.Tab("BooruYOLO"): | |
booruyolo_det.make_ui() | |
return demo | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 3. Launch (Gradio β₯4 syntax) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
if __name__ == "__main__": | |
demo = build_ui() | |
# default_concurrency_limit β replaces old concurrency_count | |
demo.queue(default_concurrency_limit=os.cpu_count()).launch() | |