John6666 commited on
Commit
dc4542a
·
verified ·
1 Parent(s): be2c613

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +13 -12
  2. app.py +102 -0
  3. packages.txt +1 -0
  4. pre-requirements.txt +1 -0
  5. requirements.txt +20 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Yolotest
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.37.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: test YOLO
3
+ emoji: 🙄
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.40.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import spaces
3
+ import gradio as gr
4
+ from functools import partial
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ @spaces.GPU
8
+ def dummy_gpu():
9
+ pass
10
+
11
+ # https://github.com/R3gm/stablepy/blob/main/stablepy/diffusers_vanilla/adetailer.py
12
+ # =====================================
13
+ # Yolo
14
+ # =====================================
15
+ from pathlib import Path
16
+ import numpy as np
17
+ import torch
18
+ from huggingface_hub import hf_hub_download
19
+ from PIL import Image, ImageDraw
20
+ from torchvision.transforms.functional import to_pil_image
21
+ from ultralytics import YOLO
22
+
23
+ def create_mask_from_bbox(
24
+ bboxes: np.ndarray, shape: tuple[int, int]
25
+ ) -> list[Image.Image]:
26
+ """
27
+ Parameters
28
+ ----------
29
+ bboxes: list[list[float]]
30
+ list of [x1, y1, x2, y2]
31
+ bounding boxes
32
+ shape: tuple[int, int]
33
+ shape of the image (width, height)
34
+
35
+ Returns
36
+ -------
37
+ masks: list[Image.Image]
38
+ A list of masks
39
+
40
+ """
41
+ masks = []
42
+ for bbox in bboxes:
43
+ mask = Image.new("L", shape, "black")
44
+ mask_draw = ImageDraw.Draw(mask)
45
+ mask_draw.rectangle(bbox, fill="white")
46
+ masks.append(mask)
47
+ return masks
48
+
49
+
50
+ def mask_to_pil(masks: torch.Tensor, shape: tuple[int, int]) -> list[Image.Image]:
51
+ """
52
+ Parameters
53
+ ----------
54
+ masks: torch.Tensor, dtype=torch.float32, shape=(N, H, W).
55
+ The device can be CUDA, but `to_pil_image` takes care of that.
56
+
57
+ shape: tuple[int, int]
58
+ (width, height) of the original image
59
+
60
+ Returns
61
+ -------
62
+ images: list[Image.Image]
63
+ """
64
+ n = masks.shape[0]
65
+ return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
66
+
67
+
68
+ def yolo_detector(
69
+ image: Image.Image, model_path: str | Path | None = None, confidence: float = 0.3
70
+ ) -> list[Image.Image] | None:
71
+ if not model_path:
72
+ model_path = hf_hub_download("Bingsu/adetailer", "face_yolov8n.pt")
73
+ model = YOLO(model_path)
74
+ pred = model(image, conf=confidence)
75
+
76
+ bboxes = pred[0].boxes.xyxy.cpu().numpy()
77
+ if bboxes.size == 0:
78
+ return None
79
+
80
+ if pred[0].masks is None:
81
+ masks = create_mask_from_bbox(bboxes, image.size)
82
+ else:
83
+ masks = mask_to_pil(pred[0].masks.data, image.size)
84
+
85
+ return masks
86
+
87
+ @spaces.GPU
88
+ def infer(text: str):
89
+ detectors = []
90
+ person_model_path = hf_hub_download("Bingsu/adetailer", "person_yolov8s-seg.pt")
91
+ person_detector = partial(yolo_detector, model_path=person_model_path)
92
+ detectors.append(person_detector)
93
+ return str(detectors)
94
+
95
+ with gr.Blocks() as demo:
96
+ input_text= gr.Textbox(label="Input", value="", show_copy_button=True)
97
+ run_button = gr.Button("Run", variant="primary")
98
+ output_text = gr.Textbox(label="Output", value="", show_copy_button=True)
99
+
100
+ run_button.click(infer, [input_text], [output_text])
101
+
102
+ demo.queue().launch()
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git-lfs aria2 -y ffmpeg
pre-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pip>=23.0.0
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers<=0.32.0
2
+ transformers==4.47.1
3
+ torch==2.4.0
4
+ numpy<2
5
+ gdown
6
+ opencv-python
7
+ torchvision
8
+ accelerate
9
+ optimum[onnxruntime]
10
+ dartrs
11
+ huggingface_hub
12
+ hf_transfer
13
+ hf_xet
14
+ translatepy
15
+ timm
16
+ rapidfuzz
17
+ sentencepiece
18
+ unidecode
19
+ ultralytics>=8.3.47
20
+ pydantic==2.10.6