Keshabwi66 commited on
Commit
673ce17
·
verified ·
1 Parent(s): 724f9fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -28,6 +28,9 @@ from torchvision import transforms
28
  from preprocess.humanparsing.run_parsing import Parsing
29
  from preprocess.openpose.run_openpose import OpenPose
30
  from torchvision.transforms.functional import to_pil_image
 
 
 
31
 
32
  def pil_to_binary_mask(pil_image, threshold=0):
33
  np_image = np.array(pil_image)
@@ -124,7 +127,7 @@ pipe = TryonPipeline.from_pretrained(
124
  pipe.unet_encoder = UNet_Encoder
125
 
126
  @spaces.GPU
127
- def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
128
  # Assuming device is set up (e.g., "cuda" or "cpu")
129
  device="cuda"
130
  openpose_model.preprocessor.body_estimation.model.to(device)
@@ -133,7 +136,7 @@ def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
133
 
134
  # Resize and prepare images
135
  garm_img = cloth_img.convert("RGB").resize((768, 1024))
136
- human_img = person_img.convert("RGB")
137
 
138
  is_checked=True;
139
  if is_checked:
@@ -142,19 +145,25 @@ def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
142
  mask, mask_gray= get_mask_location('hd', "upper_body", model_parse, keypoints)
143
  mask = mask.resize((768,1024))
144
 
145
- #mask = pil_to_binary_mask(mask.convert("RGB").resize((768, 1024)))
146
- pose_img=Image.open("00006_00.jpg")
 
 
 
 
 
 
147
 
148
- # Prepare pose image (already uploaded)
149
- pose_img = pose_img.resize((768, 1024))
 
 
150
 
151
-
152
-
153
  # Embedding generation for prompts
154
  with torch.no_grad():
155
  with torch.cuda.amp.autocast():
156
  # Generate text embeddings for garment description
157
- prompt = f"model is wearing {garment_des}"
158
  negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
159
  with torch.inference_mode():
160
  (
@@ -168,7 +177,7 @@ def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
168
  do_classifier_free_guidance=True,
169
  negative_prompt=negative_prompt,
170
  )
171
- prompt = "a photo of " + garment_des
172
  negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
173
  if not isinstance(prompt, List):
174
  prompt = [prompt] * 1
@@ -203,6 +212,7 @@ def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
203
  num_inference_steps=denoise_steps,
204
  generator=generator,
205
  strength=1.0,
 
206
  text_embeds_cloth=prompt_embeds_cloth.to(device, torch.float16),
207
  cloth=garm_tensor.to(device, torch.float16),
208
  pose_img = None,
@@ -225,6 +235,9 @@ with image_blocks as demo:
225
  with gr.Row():
226
  with gr.Column():
227
  person_img = gr.Image(label='Person Image', sources='upload', type="pil")
 
 
 
228
  with gr.Column():
229
  cloth_img = gr.Image(label='Garment Image', sources='upload', type="pil")
230
  garment_des = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", label="Garment Description")
@@ -234,6 +247,6 @@ with image_blocks as demo:
234
  image_out = gr.Image(label="Output Image", elem_id="output-img", show_share_button=False)
235
 
236
  try_button = gr.Button(value="Try-on")
237
- try_button.click(fn=start_tryon, inputs=[person_img, cloth_img, garment_des], outputs=[image_out], api_name='tryon')
238
 
239
  image_blocks.launch()
 
28
  from preprocess.humanparsing.run_parsing import Parsing
29
  from preprocess.openpose.run_openpose import OpenPose
30
  from torchvision.transforms.functional import to_pil_image
31
+ import apply_net
32
+ from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
33
+
34
 
35
  def pil_to_binary_mask(pil_image, threshold=0):
36
  np_image = np.array(pil_image)
 
127
  pipe.unet_encoder = UNet_Encoder
128
 
129
  @spaces.GPU
130
+ def start_tryon(boy,girl,person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
131
  # Assuming device is set up (e.g., "cuda" or "cpu")
132
  device="cuda"
133
  openpose_model.preprocessor.body_estimation.model.to(device)
 
136
 
137
  # Resize and prepare images
138
  garm_img = cloth_img.convert("RGB").resize((768, 1024))
139
+ human_img = person_img.convert("RGB").resize((768,1024))
140
 
141
  is_checked=True;
142
  if is_checked:
 
145
  mask, mask_gray= get_mask_location('hd', "upper_body", model_parse, keypoints)
146
  mask = mask.resize((768,1024))
147
 
148
+ human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
149
+ human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
150
+
151
+ args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
152
+ # verbosity = getattr(args, "verbosity", None)
153
+ pose_img = args.func(args,human_img_arg)
154
+ pose_img = pose_img[:,:,::-1]
155
+ pose_img = Image.fromarray(pose_img).resize((768,1024))
156
 
157
+ if boy:
158
+ prompt = "A boy is wearing"+garment_des
159
+ if girl:
160
+ prompt= "A girl is wearing"+garment_des
161
 
 
 
162
  # Embedding generation for prompts
163
  with torch.no_grad():
164
  with torch.cuda.amp.autocast():
165
  # Generate text embeddings for garment description
166
+ prompt = prompt
167
  negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
168
  with torch.inference_mode():
169
  (
 
177
  do_classifier_free_guidance=True,
178
  negative_prompt=negative_prompt,
179
  )
180
+ prompt = "A photo of " + garment_des
181
  negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
182
  if not isinstance(prompt, List):
183
  prompt = [prompt] * 1
 
212
  num_inference_steps=denoise_steps,
213
  generator=generator,
214
  strength=1.0,
215
+ pose_img=pose_img_tensor.to(device, torch.float16),
216
  text_embeds_cloth=prompt_embeds_cloth.to(device, torch.float16),
217
  cloth=garm_tensor.to(device, torch.float16),
218
  pose_img = None,
 
235
  with gr.Row():
236
  with gr.Column():
237
  person_img = gr.Image(label='Person Image', sources='upload', type="pil")
238
+ boy = gr.Checkbox(label="Yes", info="Boy",value=True)
239
+ girl = gr.Checkbox(label="Yes", info="Girl",value=False)
240
+
241
  with gr.Column():
242
  cloth_img = gr.Image(label='Garment Image', sources='upload', type="pil")
243
  garment_des = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", label="Garment Description")
 
247
  image_out = gr.Image(label="Output Image", elem_id="output-img", show_share_button=False)
248
 
249
  try_button = gr.Button(value="Try-on")
250
+ try_button.click(fn=start_tryon, inputs=[boy,girl,person_img, cloth_img, garment_des], outputs=[image_out], api_name='tryon')
251
 
252
  image_blocks.launch()