Keshabwi66 commited on
Commit
eb634d0
·
verified ·
1 Parent(s): 64dc7d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -15
app.py CHANGED
@@ -1,14 +1,14 @@
1
  import sys
 
2
  import os
3
-
 
 
4
  sys.path.append('./')
5
  os.system("pip install gradio accelerate==0.25.0 torchmetrics==1.2.1 tqdm==4.66.1 fastapi==0.111.0 transformers==4.36.2 diffusers==0.25 einops==0.7.0 bitsandbytes scipy==1.11.1 opencv-python gradio==4.24.0 fvcore cloudpickle omegaconf pycocotools basicsr av onnxruntime==1.16.2 peft==0.11.1 huggingface_hub==0.24.7 --no-deps")
6
- import spaces
7
  from fastapi import FastAPI
8
  app = FastAPI()
9
 
10
- from PIL import Image
11
- import gradio as gr
12
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
13
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
14
  from src.unet_hacked_tryon import UNet2DConditionModel
@@ -20,15 +20,13 @@ from transformers import (
20
  )
21
  from diffusers import DDPMScheduler,AutoencoderKL
22
  from typing import List
23
-
24
- import torch
25
- import os
26
  from transformers import AutoTokenizer
27
  import numpy as np
 
28
  from torchvision import transforms
29
-
30
-
31
- device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
32
 
33
  def pil_to_binary_mask(pil_image, threshold=0):
34
  np_image = np.array(pil_image)
@@ -93,7 +91,8 @@ UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
93
  torch_dtype=torch.float16,
94
  )
95
 
96
-
 
97
 
98
  UNet_Encoder.requires_grad_(False)
99
  image_encoder.requires_grad_(False)
@@ -124,15 +123,23 @@ pipe = TryonPipeline.from_pretrained(
124
  pipe.unet_encoder = UNet_Encoder
125
 
126
  @spaces.GPU
127
- def start_tryon(person_img, mask_img, cloth_img, garment_des, denoise_steps=10, seed=42):
128
  # Assuming device is set up (e.g., "cuda" or "cpu")
 
 
129
  pipe.to(device)
130
  pipe.unet_encoder.to(device)
131
 
132
  # Resize and prepare images
133
  garm_img = cloth_img.convert("RGB").resize((768, 1024))
134
  human_img = person_img.convert("RGB").resize((768, 1024))
135
- mask = pil_to_binary_mask(mask_img.convert("RGB").resize((768, 1024)))
 
 
 
 
 
 
136
  pose_img=Image.open("00006_00.jpg")
137
 
138
  # Prepare pose image (already uploaded)
@@ -215,8 +222,6 @@ with image_blocks as demo:
215
  with gr.Row():
216
  with gr.Column():
217
  person_img = gr.Image(label='Person Image', sources='upload', type="pil")
218
- mask_img = gr.Image(label='Mask Image', sources='upload', type="pil")
219
-
220
  with gr.Column():
221
  cloth_img = gr.Image(label='Garment Image', sources='upload', type="pil")
222
  garment_des = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", label="Garment Description")
 
1
  import sys
2
+ import torch
3
  import os
4
+ import spaces
5
+ import gradio as gr
6
+ from PIL import Image
7
  sys.path.append('./')
8
  os.system("pip install gradio accelerate==0.25.0 torchmetrics==1.2.1 tqdm==4.66.1 fastapi==0.111.0 transformers==4.36.2 diffusers==0.25 einops==0.7.0 bitsandbytes scipy==1.11.1 opencv-python gradio==4.24.0 fvcore cloudpickle omegaconf pycocotools basicsr av onnxruntime==1.16.2 peft==0.11.1 huggingface_hub==0.24.7 --no-deps")
 
9
  from fastapi import FastAPI
10
  app = FastAPI()
11
 
 
 
12
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
13
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
14
  from src.unet_hacked_tryon import UNet2DConditionModel
 
20
  )
21
  from diffusers import DDPMScheduler,AutoencoderKL
22
  from typing import List
 
 
 
23
  from transformers import AutoTokenizer
24
  import numpy as np
25
+ from utils_mask import get_mask_location
26
  from torchvision import transforms
27
+ from preprocess.humanparsing.run_parsing import Parsing
28
+ from preprocess.openpose.run_openpose import OpenPose
29
+ from torchvision.transforms.functional import to_pil_image
30
 
31
  def pil_to_binary_mask(pil_image, threshold=0):
32
  np_image = np.array(pil_image)
 
91
  torch_dtype=torch.float16,
92
  )
93
 
94
+ parsing_model = Parsing(0)
95
+ openpose_model = OpenPose(0)
96
 
97
  UNet_Encoder.requires_grad_(False)
98
  image_encoder.requires_grad_(False)
 
123
  pipe.unet_encoder = UNet_Encoder
124
 
125
  @spaces.GPU
126
+ def start_tryon(person_img,cloth_img, garment_des, denoise_steps=10, seed=42):
127
  # Assuming device is set up (e.g., "cuda" or "cpu")
128
+ device="cuda"
129
+ openpose_model.preprocessor.body_estimation.model.to(device)
130
  pipe.to(device)
131
  pipe.unet_encoder.to(device)
132
 
133
  # Resize and prepare images
134
  garm_img = cloth_img.convert("RGB").resize((768, 1024))
135
  human_img = person_img.convert("RGB").resize((768, 1024))
136
+ is_checked=True;
137
+ if is_checked:
138
+ keypoints = openpose_model(human_img.resize((384,512)))
139
+ model_parse, _ = parsing_model(human_img.resize((384,512)))
140
+ mask= get_mask_location('hd', "upper_body", model_parse, keypoints)
141
+
142
+ mask = pil_to_binary_mask(mask.convert("RGB").resize((768, 1024)))
143
  pose_img=Image.open("00006_00.jpg")
144
 
145
  # Prepare pose image (already uploaded)
 
222
  with gr.Row():
223
  with gr.Column():
224
  person_img = gr.Image(label='Person Image', sources='upload', type="pil")
 
 
225
  with gr.Column():
226
  cloth_img = gr.Image(label='Garment Image', sources='upload', type="pil")
227
  garment_des = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", label="Garment Description")