Spaces:
Running
Running
Commit
·
1e0ca0d
1
Parent(s):
677a6b5
test
Browse files- .gitattributes +2 -0
- README.md +7 -6
- app.py +279 -0
- example/cloth/cloth02.jpg +0 -0
- example/cloth/cloth_09133_00.jpg +0 -0
- example/human/human01.jpg +3 -0
- example/human/human02.jpg +3 -0
- requirements.txt +7 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
example/human/human01.jpg filter=lfs diff=lfs merge=lfs -text
|
37 |
+
example/human/human02.jpg filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license:
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Fashion Virtual Tryon
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.23.3
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: mit
|
11 |
+
short_description: Fashion Virtual Try-on
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import gradio as gr
|
4 |
+
import mediapipe as mp
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
from gradio_client import Client, handle_file
|
8 |
+
|
9 |
+
# Set up paths
|
10 |
+
example_path = os.path.join(os.path.dirname(__file__), 'example')
|
11 |
+
|
12 |
+
garm_list = os.listdir(os.path.join(example_path, "cloth"))
|
13 |
+
garm_list_path = [os.path.join(example_path, "cloth", garm) for garm in garm_list]
|
14 |
+
|
15 |
+
human_list = os.listdir(os.path.join(example_path, "human"))
|
16 |
+
human_list_path = [os.path.join(example_path, "human", human) for human in human_list]
|
17 |
+
|
18 |
+
# Initialize MediaPipe Pose
|
19 |
+
mp_pose = mp.solutions.pose
|
20 |
+
pose = mp_pose.Pose(static_image_mode=True)
|
21 |
+
mp_drawing = mp.solutions.drawing_utils
|
22 |
+
mp_pose_landmark = mp_pose.PoseLandmark
|
23 |
+
|
24 |
+
def detect_pose(image):
|
25 |
+
# Convert to RGB
|
26 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
27 |
+
|
28 |
+
# Run pose detection
|
29 |
+
result = pose.process(image_rgb)
|
30 |
+
|
31 |
+
keypoints = {}
|
32 |
+
|
33 |
+
if result.pose_landmarks:
|
34 |
+
# Draw landmarks on image
|
35 |
+
mp_drawing.draw_landmarks(image, result.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
36 |
+
|
37 |
+
# Get image dimensions
|
38 |
+
height, width, _ = image.shape
|
39 |
+
|
40 |
+
# Extract specific landmarks
|
41 |
+
landmark_indices = {
|
42 |
+
'left_shoulder': mp_pose_landmark.LEFT_SHOULDER,
|
43 |
+
'right_shoulder': mp_pose_landmark.RIGHT_SHOULDER,
|
44 |
+
'left_hip': mp_pose_landmark.LEFT_HIP,
|
45 |
+
'right_hip': mp_pose_landmark.RIGHT_HIP
|
46 |
+
}
|
47 |
+
|
48 |
+
for name, index in landmark_indices.items():
|
49 |
+
lm = result.pose_landmarks.landmark[index]
|
50 |
+
x, y = int(lm.x * width), int(lm.y * height)
|
51 |
+
keypoints[name] = (x, y)
|
52 |
+
|
53 |
+
# Draw a circle + label for debug
|
54 |
+
cv2.circle(image, (x, y), 5, (0, 255, 0), -1)
|
55 |
+
cv2.putText(image, name, (x + 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
56 |
+
|
57 |
+
return image
|
58 |
+
|
59 |
+
def align_clothing(body_img, clothing_img):
|
60 |
+
image_rgb = cv2.cvtColor(body_img, cv2.COLOR_BGR2RGB)
|
61 |
+
result = pose.process(image_rgb)
|
62 |
+
output = body_img.copy()
|
63 |
+
|
64 |
+
if result.pose_landmarks:
|
65 |
+
h, w, _ = output.shape
|
66 |
+
|
67 |
+
# Extract key points
|
68 |
+
def get_point(landmark_id):
|
69 |
+
lm = result.pose_landmarks.landmark[landmark_id]
|
70 |
+
return int(lm.x * w), int(lm.y * h)
|
71 |
+
|
72 |
+
left_shoulder = get_point(mp_pose_landmark.LEFT_SHOULDER)
|
73 |
+
right_shoulder = get_point(mp_pose_landmark.RIGHT_SHOULDER)
|
74 |
+
left_hip = get_point(mp_pose_landmark.LEFT_HIP)
|
75 |
+
right_hip = get_point(mp_pose_landmark.RIGHT_HIP)
|
76 |
+
|
77 |
+
# Destination box (torso region)
|
78 |
+
dst_pts = np.array([
|
79 |
+
left_shoulder,
|
80 |
+
right_shoulder,
|
81 |
+
right_hip,
|
82 |
+
left_hip
|
83 |
+
], dtype=np.float32)
|
84 |
+
|
85 |
+
# Source box (clothing image corners)
|
86 |
+
src_h, src_w = clothing_img.shape[:2]
|
87 |
+
src_pts = np.array([
|
88 |
+
[0, 0],
|
89 |
+
[src_w, 0],
|
90 |
+
[src_w, src_h],
|
91 |
+
[0, src_h]
|
92 |
+
], dtype=np.float32)
|
93 |
+
|
94 |
+
# Compute perspective transform and warp
|
95 |
+
matrix = cv2.getPerspectiveTransform(src_pts, dst_pts)
|
96 |
+
warped_clothing = cv2.warpPerspective(clothing_img, matrix, (w, h), borderMode=cv2.BORDER_TRANSPARENT)
|
97 |
+
|
98 |
+
# Handle transparency
|
99 |
+
if clothing_img.shape[2] == 4:
|
100 |
+
alpha = warped_clothing[:, :, 3] / 255.0
|
101 |
+
for c in range(3):
|
102 |
+
output[:, :, c] = (1 - alpha) * output[:, :, c] + alpha * warped_clothing[:, :, c]
|
103 |
+
else:
|
104 |
+
output = cv2.addWeighted(output, 0.8, warped_clothing, 0.5, 0)
|
105 |
+
|
106 |
+
return output
|
107 |
+
|
108 |
+
def process_image(human_img_path, garm_img_path):
|
109 |
+
client = Client("franciszzj/Leffa")
|
110 |
+
|
111 |
+
result = client.predict(
|
112 |
+
src_image_path=handle_file(human_img_path),
|
113 |
+
ref_image_path=handle_file(garm_img_path),
|
114 |
+
ref_acceleration=False,
|
115 |
+
step=30,
|
116 |
+
scale=2.5,
|
117 |
+
seed=42,
|
118 |
+
vt_model_type="viton_hd",
|
119 |
+
vt_garment_type="upper_body",
|
120 |
+
vt_repaint=False,
|
121 |
+
api_name="/leffa_predict_vt"
|
122 |
+
)
|
123 |
+
|
124 |
+
print(result)
|
125 |
+
generated_image_path = result[0]
|
126 |
+
print("generated_image_path" + generated_image_path)
|
127 |
+
generated_image = Image.open(generated_image_path)
|
128 |
+
|
129 |
+
return generated_image
|
130 |
+
|
131 |
+
# Custom CSS for better styling
|
132 |
+
custom_css = """
|
133 |
+
.gradio-container {
|
134 |
+
max-width: 1200px !important;
|
135 |
+
}
|
136 |
+
.container {
|
137 |
+
max-width: 1200px;
|
138 |
+
margin: auto;
|
139 |
+
padding: 20px;
|
140 |
+
}
|
141 |
+
.header {
|
142 |
+
text-align: center;
|
143 |
+
margin-bottom: 30px;
|
144 |
+
}
|
145 |
+
.header h1 {
|
146 |
+
font-size: 2.5rem;
|
147 |
+
margin-bottom: 10px;
|
148 |
+
background: linear-gradient(45deg, #FF6B6B, #4ECDC4);
|
149 |
+
-webkit-background-clip: text;
|
150 |
+
-webkit-text-fill-color: transparent;
|
151 |
+
}
|
152 |
+
.header p {
|
153 |
+
font-size: 1.1rem;
|
154 |
+
color: #666;
|
155 |
+
}
|
156 |
+
.image-container {
|
157 |
+
border-radius: 10px;
|
158 |
+
overflow: hidden;
|
159 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
160 |
+
}
|
161 |
+
.upload-section {
|
162 |
+
background: #f9f9f9;
|
163 |
+
padding: 20px;
|
164 |
+
border-radius: 10px;
|
165 |
+
margin-bottom: 20px;
|
166 |
+
}
|
167 |
+
.try-btn {
|
168 |
+
background: linear-gradient(45deg, #FF6B6B, #4ECDC4) !important;
|
169 |
+
color: white !important;
|
170 |
+
font-weight: bold !important;
|
171 |
+
padding: 12px 24px !important;
|
172 |
+
border-radius: 50px !important;
|
173 |
+
border: none !important;
|
174 |
+
}
|
175 |
+
.try-btn:hover {
|
176 |
+
transform: translateY(-2px);
|
177 |
+
box-shadow: 0 6px 12px rgba(0,0,0,0.15);
|
178 |
+
}
|
179 |
+
.examples-section {
|
180 |
+
margin-top: 15px;
|
181 |
+
}
|
182 |
+
.examples-section h3 {
|
183 |
+
margin-bottom: 10px;
|
184 |
+
color: #555;
|
185 |
+
}
|
186 |
+
"""
|
187 |
+
|
188 |
+
with gr.Blocks(css=custom_css, title="Virtual Try-On Fashion") as demo:
|
189 |
+
with gr.Column(elem_classes=["container"]):
|
190 |
+
with gr.Column(elem_classes=["header"]):
|
191 |
+
gr.HTML("""
|
192 |
+
<h1>Virtual Try-On Fashion</h1>
|
193 |
+
<p>Upload your photo and select a garment to see how it looks on you! ✨</p>
|
194 |
+
""")
|
195 |
+
|
196 |
+
with gr.Row():
|
197 |
+
with gr.Column(elem_classes=["upload-section"]):
|
198 |
+
gr.Markdown("### Step 1: Upload Your Photo")
|
199 |
+
human_img = gr.Image(
|
200 |
+
type="filepath",
|
201 |
+
label='Person Image',
|
202 |
+
interactive=True,
|
203 |
+
elem_classes=["image-container"]
|
204 |
+
)
|
205 |
+
with gr.Column(elem_classes=["examples-section"]):
|
206 |
+
gr.Markdown("**Example poses:**")
|
207 |
+
example = gr.Examples(
|
208 |
+
inputs=human_img,
|
209 |
+
examples_per_page=5,
|
210 |
+
examples=human_list_path,
|
211 |
+
label=None
|
212 |
+
)
|
213 |
+
|
214 |
+
with gr.Column(elem_classes=["upload-section"]):
|
215 |
+
gr.Markdown("### Step 2: Select Garment")
|
216 |
+
garm_img = gr.Image(
|
217 |
+
label="Clothing Item",
|
218 |
+
type="filepath",
|
219 |
+
interactive=True,
|
220 |
+
elem_classes=["image-container"]
|
221 |
+
)
|
222 |
+
with gr.Column(elem_classes=["examples-section"]):
|
223 |
+
gr.Markdown("**Example garments:**")
|
224 |
+
example = gr.Examples(
|
225 |
+
inputs=garm_img,
|
226 |
+
examples_per_page=5,
|
227 |
+
examples=garm_list_path,
|
228 |
+
label=None
|
229 |
+
)
|
230 |
+
|
231 |
+
with gr.Column():
|
232 |
+
gr.Markdown("### Step 3: See the Result")
|
233 |
+
image_out = gr.Image(
|
234 |
+
label="Virtual Try-On Result",
|
235 |
+
type="pil",
|
236 |
+
elem_classes=["image-container"],
|
237 |
+
interactive=False
|
238 |
+
)
|
239 |
+
with gr.Row():
|
240 |
+
gr.ClearButton([human_img, garm_img, image_out])
|
241 |
+
|
242 |
+
with gr.Row():
|
243 |
+
try_button = gr.Button(
|
244 |
+
value="Try It On Now",
|
245 |
+
variant='primary',
|
246 |
+
elem_classes=["try-btn"]
|
247 |
+
)
|
248 |
+
|
249 |
+
# Add some information sections
|
250 |
+
with gr.Accordion("ℹ️ How to use this tool", open=False):
|
251 |
+
gr.Markdown("""
|
252 |
+
1. **Upload your photo**: Choose a clear front-facing photo with visible shoulders and hips
|
253 |
+
2. **Select a garment**: Pick from our examples or upload your own clothing image
|
254 |
+
3. **Click 'Try It On Now'**: See how the clothing looks on you instantly!
|
255 |
+
|
256 |
+
For best results:
|
257 |
+
- Use well-lit photos with good contrast
|
258 |
+
- Avoid baggy clothing in your reference photo
|
259 |
+
- Front-facing poses work best
|
260 |
+
""")
|
261 |
+
|
262 |
+
with gr.Accordion("⚠️ Limitations", open=False):
|
263 |
+
gr.Markdown("""
|
264 |
+
- Works best with upper body garments (shirts, jackets)
|
265 |
+
- May not work perfectly with complex patterns or textures
|
266 |
+
- Results depend on pose detection accuracy
|
267 |
+
- Currently optimized for front-facing poses
|
268 |
+
""")
|
269 |
+
|
270 |
+
# Linking the button to the processing function
|
271 |
+
try_button.click(
|
272 |
+
fn=process_image,
|
273 |
+
inputs=[human_img, garm_img],
|
274 |
+
outputs=image_out,
|
275 |
+
api_name="try_on"
|
276 |
+
)
|
277 |
+
|
278 |
+
if __name__ == "__main__":
|
279 |
+
demo.launch(show_error=True, share=False)
|
example/cloth/cloth02.jpg
ADDED
![]() |
example/cloth/cloth_09133_00.jpg
ADDED
![]() |
example/human/human01.jpg
ADDED
![]() |
Git LFS Details
|
example/human/human02.jpg
ADDED
![]() |
Git LFS Details
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mediapipe>=0.10.8
|
2 |
+
numpy==1.26.4
|
3 |
+
opencv-contrib-python==4.11.0.86
|
4 |
+
opencv-python==4.11.0.86
|
5 |
+
gradio==5.23.3
|
6 |
+
gradio_client==1.8.0
|
7 |
+
|