File size: 5,111 Bytes
0c32807
 
 
 
 
 
 
 
 
c4853f5
 
0c32807
 
 
 
 
 
 
 
cef74fb
5ba8137
0c32807
5ba8137
0c32807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c4853f5
5ba8137
0c32807
5ba8137
0c32807
 
5ba8137
0c32807
 
 
 
 
5ba8137
0c32807
 
 
90b4364
5ba8137
0c32807
5ba8137
0c32807
5ba8137
0c32807
 
5ba8137
 
0c32807
5ba8137
 
0c32807
 
5ba8137
 
 
0c32807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ba8137
 
 
 
0c32807
5ba8137
72c27b0
0c32807
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
"""
app.py  –  Anime Object-Detection Space (ZeroGPU ready)

β€’ Gradio β‰₯ 4.44 (no more `concurrency_count=`).
β€’ Pydantic pinned (>=2.10.0,<2.11) to avoid schema bug.
β€’ One global @spaces.GPU wrapper so ZeroGPU is happy.
β€’ Each detector class gets its own tab via .make_ui().
"""

import os
import gradio as gr
import spaces

# ---- your existing detector classes --------------------------
from detection import (
    EyesDetection, FaceDetection, HeadDetection, PersonDetection,
    HandDetection, CensorDetection, HalfBodyDetection,
    NudeNetDetection, BooruYOLODetection,
)

# ──────────────────────────────────────────────────────────────
# 0.  Instantiate detectors once (they cache their models)
# ──────────────────────────────────────────────────────────────
face_det       = FaceDetection()
head_det       = HeadDetection()
person_det     = PersonDetection()
halfbody_det   = HalfBodyDetection()
eyes_det       = EyesDetection()
hand_det       = HandDetection()
censor_det     = CensorDetection()
nudenet_det    = NudeNetDetection()
booruyolo_det  = BooruYOLODetection()

# A mapping so the GPU wrapper can call the right detector
DETECTORS = {
    "face"      : face_det,
    "head"      : head_det,
    "person"    : person_det,
    "halfbody"  : halfbody_det,
    "eyes"      : eyes_det,
    "hand"      : hand_det,
    "censor"    : censor_det,
    "nudenet"   : nudenet_det,
    "booruyolo" : booruyolo_det,
}

# ──────────────────────────────────────────────────────────────
# 1.  Single top-level GPU function   (ZeroGPU REQUIREMENT)
# ──────────────────────────────────────────────────────────────
@spaces.GPU            # <- makes HF allocate a ZeroGPU worker
def run_detection(img, det_key, model_name=None):
    """
    Parameters
    ----------
    img       : PIL.Image | numpy.ndarray  – image from gr.Image
    det_key   : str  – one of DETECTORS.keys()
    model_name: str | None – optional model override
    """
    detector = DETECTORS[det_key]
    # Every detector already exposes .detect(img, model_name=...)
    return detector.detect(img, model_name=model_name)

# ──────────────────────────────────────────────────────────────
# 2.  Build the UI (mirrors the working public Space layout)
# ──────────────────────────────────────────────────────────────
_GLOBAL_CSS = ".limit-height { max-height: 55vh; }"

def build_ui() -> gr.Blocks:
    with gr.Blocks(css=_GLOBAL_CSS) as demo:
        with gr.Row():
            gr.HTML(
                "<h2 style='text-align:center'>Object Detections For Anime</h2>"
            )
        gr.Markdown(
            "Online demo for detection functions of "
            "[imgutils.detect](https://dghs-imgutils.deepghs.org/main/api_doc/detect/index.html).  \n"
            "Install locally with `pip install dghs-imgutils`."
        )

        with gr.Row():
            with gr.Tabs():

                # ---- each tab reuses the detector's built-in UI -----------
                with gr.Tab("Face Detection"):
                    face_det.make_ui()

                with gr.Tab("Head Detection"):
                    head_det.make_ui()

                with gr.Tab("Person Detection"):
                    person_det.make_ui()

                with gr.Tab("Half Body Detection"):
                    halfbody_det.make_ui()

                with gr.Tab("Eyes Detection"):
                    eyes_det.make_ui()

                with gr.Tab("Hand Detection"):
                    hand_det.make_ui()

                with gr.Tab("Censor Point Detection"):
                    censor_det.make_ui()

                with gr.Tab("NudeNet"):
                    nudenet_det.make_ui()

                with gr.Tab("BooruYOLO"):
                    booruyolo_det.make_ui()

    return demo

# ──────────────────────────────────────────────────────────────
# 3.  Launch (Gradio β‰₯4 syntax)
# ──────────────────────────────────────────────────────────────
if __name__ == "__main__":
    demo = build_ui()
    # default_concurrency_limit β†’ replaces old concurrency_count
    demo.queue(default_concurrency_limit=os.cpu_count()).launch()