saliacoel commited on
Commit
0c32807
Β·
verified Β·
1 Parent(s): aa24117

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -43
app.py CHANGED
@@ -1,70 +1,119 @@
 
 
 
 
 
 
 
 
 
1
  import os
2
- import spaces
3
  import gradio as gr
4
- from detection import EyesDetection # your existing class
 
 
 
 
 
 
 
5
 
6
  # ──────────────────────────────────────────────────────────────
7
- # 1. Instantiate the detector once (so the model loads just once)
8
  # ──────────────────────────────────────────────────────────────
9
- eyes_detector = EyesDetection()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # ──────────────────────────────────────────────────────────────
12
- # 2. Top-level GPU function (required by ZeroGPU)
13
- # – Must receive every argument the detector needs.
14
  # ──────────────────────────────────────────────────────────────
15
- @spaces.GPU
16
- def run_detection(img, model_name):
17
  """
18
- img : numpy array / PIL.Image from gr.Image
19
- model_name : str chosen by the user (e.g. 'anime-eyes')
 
 
 
20
  """
21
- return eyes_detector.detect(img, model_name=model_name)
 
 
22
 
23
  # ──────────────────────────────────────────────────────────────
24
- # 3. Build the Gradio UI
25
  # ──────────────────────────────────────────────────────────────
26
- GLOBAL_CSS = """
27
- .limit-height { max-height: 55vh; }
28
- """
29
 
30
- def build_ui():
31
- with gr.Blocks(css=GLOBAL_CSS) as demo:
32
- # ─ Header ───────────────────────────────────────────────
33
  with gr.Row():
34
  gr.HTML(
35
- "<h2 style='text-align:center'>Anime Object (Eyes) Detection</h2>"
36
  )
37
  gr.Markdown(
38
- "Online demo for the eye-detection functions in "
39
- "[imgutils.detect](https://dghs-imgutils.deepghs.org/main/api_doc/detect/index.html). "
40
  "Install locally with `pip install dghs-imgutils`."
41
  )
42
 
43
- # ─ Detection tab ────────────────────────────────────────
44
- with gr.Tab("Eyes Detection"):
45
- with gr.Row():
46
- input_img = gr.Image(type="numpy", label="Upload image")
47
- output = gr.JSON(label="Detection result")
48
- with gr.Row():
49
- model_dropdown = gr.Dropdown(
50
- choices=["anime-eyes", "cartoon-eyes", "real-eyes"],
51
- value="anime-eyes",
52
- label="Model",
53
- )
54
- run_btn = gr.Button("Detect Eyes")
55
-
56
- run_btn.click(
57
- fn=run_detection,
58
- inputs=[input_img, model_dropdown],
59
- outputs=output,
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  return demo
63
 
64
  # ──────────────────────────────────────────────────────────────
65
- # 4. Launch
66
  # ──────────────────────────────────────────────────────────────
67
  if __name__ == "__main__":
68
- app = build_ui()
69
- # Keep queue size small (ZeroGPU sessions are short-lived)
70
- app.queue(concurrency_count=os.cpu_count()).launch()
 
1
+ """
2
+ app.py – Anime Object-Detection Space (ZeroGPU ready)
3
+
4
+ β€’ Gradio β‰₯ 4.44 (no more `concurrency_count=`).
5
+ β€’ Pydantic pinned (>=2.10.0,<2.11) to avoid schema bug.
6
+ β€’ One global @spaces.GPU wrapper so ZeroGPU is happy.
7
+ β€’ Each detector class gets its own tab via .make_ui().
8
+ """
9
+
10
  import os
 
11
  import gradio as gr
12
+ import spaces
13
+
14
+ # ---- your existing detector classes --------------------------
15
+ from detection import (
16
+ EyesDetection, FaceDetection, HeadDetection, PersonDetection,
17
+ HandDetection, CensorDetection, HalfBodyDetection,
18
+ NudeNetDetection, BooruYOLODetection,
19
+ )
20
 
21
  # ──────────────────────────────────────────────────────────────
22
+ # 0. Instantiate detectors once (they cache their models)
23
  # ──────────────────────────────────────────────────────────────
24
+ face_det = FaceDetection()
25
+ head_det = HeadDetection()
26
+ person_det = PersonDetection()
27
+ halfbody_det = HalfBodyDetection()
28
+ eyes_det = EyesDetection()
29
+ hand_det = HandDetection()
30
+ censor_det = CensorDetection()
31
+ nudenet_det = NudeNetDetection()
32
+ booruyolo_det = BooruYOLODetection()
33
+
34
+ # A mapping so the GPU wrapper can call the right detector
35
+ DETECTORS = {
36
+ "face" : face_det,
37
+ "head" : head_det,
38
+ "person" : person_det,
39
+ "halfbody" : halfbody_det,
40
+ "eyes" : eyes_det,
41
+ "hand" : hand_det,
42
+ "censor" : censor_det,
43
+ "nudenet" : nudenet_det,
44
+ "booruyolo" : booruyolo_det,
45
+ }
46
 
47
  # ──────────────────────────────────────────────────────────────
48
+ # 1. Single top-level GPU function (ZeroGPU REQUIREMENT)
 
49
  # ──────────────────────────────────────────────────────────────
50
+ @spaces.GPU # <- makes HF allocate a ZeroGPU worker
51
+ def run_detection(img, det_key, model_name=None):
52
  """
53
+ Parameters
54
+ ----------
55
+ img : PIL.Image | numpy.ndarray – image from gr.Image
56
+ det_key : str – one of DETECTORS.keys()
57
+ model_name: str | None – optional model override
58
  """
59
+ detector = DETECTORS[det_key]
60
+ # Every detector already exposes .detect(img, model_name=...)
61
+ return detector.detect(img, model_name=model_name)
62
 
63
  # ──────────────────────────────────────────────────────────────
64
+ # 2. Build the UI (mirrors the working public Space layout)
65
  # ──────────────────────────────────────────────────────────────
66
+ _GLOBAL_CSS = ".limit-height { max-height: 55vh; }"
 
 
67
 
68
+ def build_ui() -> gr.Blocks:
69
+ with gr.Blocks(css=_GLOBAL_CSS) as demo:
 
70
  with gr.Row():
71
  gr.HTML(
72
+ "<h2 style='text-align:center'>Object Detections For Anime</h2>"
73
  )
74
  gr.Markdown(
75
+ "Online demo for detection functions of "
76
+ "[imgutils.detect](https://dghs-imgutils.deepghs.org/main/api_doc/detect/index.html). \n"
77
  "Install locally with `pip install dghs-imgutils`."
78
  )
79
 
80
+ with gr.Row():
81
+ with gr.Tabs():
82
+
83
+ # ---- each tab reuses the detector's built-in UI -----------
84
+ with gr.Tab("Face Detection"):
85
+ face_det.make_ui()
86
+
87
+ with gr.Tab("Head Detection"):
88
+ head_det.make_ui()
89
+
90
+ with gr.Tab("Person Detection"):
91
+ person_det.make_ui()
92
+
93
+ with gr.Tab("Half Body Detection"):
94
+ halfbody_det.make_ui()
95
+
96
+ with gr.Tab("Eyes Detection"):
97
+ eyes_det.make_ui()
98
+
99
+ with gr.Tab("Hand Detection"):
100
+ hand_det.make_ui()
101
+
102
+ with gr.Tab("Censor Point Detection"):
103
+ censor_det.make_ui()
104
+
105
+ with gr.Tab("NudeNet"):
106
+ nudenet_det.make_ui()
107
+
108
+ with gr.Tab("BooruYOLO"):
109
+ booruyolo_det.make_ui()
110
 
111
  return demo
112
 
113
  # ──────────────────────────────────────────────────────────────
114
+ # 3. Launch (Gradio β‰₯4 syntax)
115
  # ──────────────────────────────────────────────────────────────
116
  if __name__ == "__main__":
117
+ demo = build_ui()
118
+ # default_concurrency_limit β†’ replaces old concurrency_count
119
+ demo.queue(default_concurrency_limit=os.cpu_count()).launch()