Spaces:
Runtime error
Runtime error
update gradio
Browse files
app.py
CHANGED
|
@@ -56,7 +56,7 @@ add_sampling_metadata = True
|
|
| 56 |
|
| 57 |
@spaces.GPU(duration=120)
|
| 58 |
@torch.inference_mode()
|
| 59 |
-
def edit(
|
| 60 |
source_prompt, target_prompt,
|
| 61 |
inversion_num_steps, denoise_num_steps,
|
| 62 |
skip_step,
|
|
@@ -66,26 +66,20 @@ def edit(init_image, brush_canvas,
|
|
| 66 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 67 |
torch.cuda.empty_cache()
|
| 68 |
|
|
|
|
|
|
|
| 69 |
shape = init_image.shape
|
| 70 |
height = shape[0] if shape[0] % 16 == 0 else shape[0] - shape[0] % 16
|
| 71 |
width = shape[1] if shape[1] % 16 == 0 else shape[1] - shape[1] % 16
|
| 72 |
-
|
| 73 |
init_image = init_image[:height, :width, :]
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
mask = mask.astype(int)
|
| 80 |
-
else:
|
| 81 |
-
mask = np.any(init_image != brush_canvas, axis=-1) # 得到一个二维的布尔数组
|
| 82 |
-
mask = mask.astype(int)
|
| 83 |
-
mask_array = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8)
|
| 84 |
-
mask_array[:,:,0] = mask * 255 # R
|
| 85 |
-
mask_array[:,:,3] = mask * 128 # A (半透明,128表示50%透明度)
|
| 86 |
-
mask_image = Image.fromarray(mask_array, 'RGBA')
|
| 87 |
-
original_image = Image.fromarray(np.concatenate((init_image, np.full((height, width, 1), 255, dtype=np.uint8)), axis=2), 'RGBA')
|
| 88 |
-
masked_image = Image.alpha_composite(original_image, mask_image)
|
| 89 |
mask = torch.from_numpy(mask).unsqueeze(0).unsqueeze(0).to(torch.bfloat16).to(device)
|
| 90 |
|
| 91 |
init_image = encode(init_image, device).to(device)
|
|
@@ -180,8 +174,8 @@ def create_demo(model_name: str):
|
|
| 180 |
|
| 181 |
🔔🔔[<b>Important</b>] Editing steps:<br>
|
| 182 |
1️⃣ Upload your image that needs to be edited (The resolution is expected be less than 1360*768, or the memory of GPU may be not enough.) <br>
|
| 183 |
-
2️⃣
|
| 184 |
-
3️⃣ Fill in your
|
| 185 |
4️⃣ Click the "Edit" button to generate your edited image! <br>
|
| 186 |
"""
|
| 187 |
article = r"""
|
|
@@ -204,15 +198,12 @@ def create_demo(model_name: str):
|
|
| 204 |
inversion_num_steps = gr.Slider(1, 50, 28, step=1, label="Number of inversion steps")
|
| 205 |
target_prompt = gr.Textbox(label="Target Prompt", value='In a cluttered wooden cabin, a workbench holds a green neon sign that reads "I love iccv"' )
|
| 206 |
denoise_num_steps = gr.Slider(1, 50, 28, step=1, label="Number of denoise steps")
|
| 207 |
-
init_image = gr.Image(label="Input Image", visible=True)
|
| 208 |
brush_canvas = gr.ImageEditor(label="Brush Canvas",
|
| 209 |
sources=('upload'),
|
| 210 |
-
brush=gr.Brush(
|
| 211 |
-
default_color="#000000"),
|
| 212 |
interactive=True,
|
| 213 |
-
container=True,
|
| 214 |
transforms=[],
|
| 215 |
-
|
| 216 |
format='png',scale=1)
|
| 217 |
|
| 218 |
edit_btn = gr.Button("edit")
|
|
@@ -220,9 +211,8 @@ def create_demo(model_name: str):
|
|
| 220 |
|
| 221 |
with gr.Column():
|
| 222 |
with gr.Accordion("Advanced Options", open=True):
|
| 223 |
-
# num_steps = gr.Slider(1, 30, 25, step=1, label="Number of steps")
|
| 224 |
|
| 225 |
-
skip_step = gr.Slider(0, 30, 4, step=1, label="Number of
|
| 226 |
inversion_guidance = gr.Slider(1.0, 10.0, 1.5, step=0.1, label="inversion Guidance", interactive=not is_schnell)
|
| 227 |
denoise_guidance = gr.Slider(1.0, 10.0, 5.5, step=0.1, label="denoise Guidance", interactive=not is_schnell)
|
| 228 |
seed = gr.Textbox('0', label="Seed (-1 for random)", visible=True)
|
|
@@ -234,7 +224,7 @@ def create_demo(model_name: str):
|
|
| 234 |
output_image = gr.Image(label="Generated Image")
|
| 235 |
edit_btn.click(
|
| 236 |
fn=edit,
|
| 237 |
-
inputs=[
|
| 238 |
source_prompt, target_prompt,
|
| 239 |
inversion_num_steps, denoise_num_steps,
|
| 240 |
skip_step,
|
|
|
|
| 56 |
|
| 57 |
@spaces.GPU(duration=120)
|
| 58 |
@torch.inference_mode()
|
| 59 |
+
def edit(brush_canvas,
|
| 60 |
source_prompt, target_prompt,
|
| 61 |
inversion_num_steps, denoise_num_steps,
|
| 62 |
skip_step,
|
|
|
|
| 66 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 67 |
torch.cuda.empty_cache()
|
| 68 |
|
| 69 |
+
rgba_init_image = brush_canvas["background"]
|
| 70 |
+
init_image = rgba_init_image[:,:,:3]
|
| 71 |
shape = init_image.shape
|
| 72 |
height = shape[0] if shape[0] % 16 == 0 else shape[0] - shape[0] % 16
|
| 73 |
width = shape[1] if shape[1] % 16 == 0 else shape[1] - shape[1] % 16
|
|
|
|
| 74 |
init_image = init_image[:height, :width, :]
|
| 75 |
+
rgba_init_image = rgba_init_image[:height, :width, :]
|
| 76 |
+
|
| 77 |
+
rgba_mask = brush_canvas["layers"][0][:height, :width, :]
|
| 78 |
+
mask = rgba_mask[:,:,3]/255
|
| 79 |
+
mask = mask.astype(int)
|
| 80 |
|
| 81 |
+
rgba_mask[:,:,3] = rgba_mask[:,:,3]//2
|
| 82 |
+
masked_image = Image.alpha_composite(Image.fromarray(rgba_init_image, 'RGBA'), Image.fromarray(rgba_mask, 'RGBA'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
mask = torch.from_numpy(mask).unsqueeze(0).unsqueeze(0).to(torch.bfloat16).to(device)
|
| 84 |
|
| 85 |
init_image = encode(init_image, device).to(device)
|
|
|
|
| 174 |
|
| 175 |
🔔🔔[<b>Important</b>] Editing steps:<br>
|
| 176 |
1️⃣ Upload your image that needs to be edited (The resolution is expected be less than 1360*768, or the memory of GPU may be not enough.) <br>
|
| 177 |
+
2️⃣ Fill in your source prompt and use the brush tool to draw your mask area. <br>
|
| 178 |
+
3️⃣ Fill in your target prompt, then adjust the hyperparameters. <br>
|
| 179 |
4️⃣ Click the "Edit" button to generate your edited image! <br>
|
| 180 |
"""
|
| 181 |
article = r"""
|
|
|
|
| 198 |
inversion_num_steps = gr.Slider(1, 50, 28, step=1, label="Number of inversion steps")
|
| 199 |
target_prompt = gr.Textbox(label="Target Prompt", value='In a cluttered wooden cabin, a workbench holds a green neon sign that reads "I love iccv"' )
|
| 200 |
denoise_num_steps = gr.Slider(1, 50, 28, step=1, label="Number of denoise steps")
|
|
|
|
| 201 |
brush_canvas = gr.ImageEditor(label="Brush Canvas",
|
| 202 |
sources=('upload'),
|
| 203 |
+
brush=gr.Brush(colors=["#ff0000"],color_mode='fixed'),
|
|
|
|
| 204 |
interactive=True,
|
|
|
|
| 205 |
transforms=[],
|
| 206 |
+
container=True,
|
| 207 |
format='png',scale=1)
|
| 208 |
|
| 209 |
edit_btn = gr.Button("edit")
|
|
|
|
| 211 |
|
| 212 |
with gr.Column():
|
| 213 |
with gr.Accordion("Advanced Options", open=True):
|
|
|
|
| 214 |
|
| 215 |
+
skip_step = gr.Slider(0, 30, 4, step=1, label="Number of skip steps")
|
| 216 |
inversion_guidance = gr.Slider(1.0, 10.0, 1.5, step=0.1, label="inversion Guidance", interactive=not is_schnell)
|
| 217 |
denoise_guidance = gr.Slider(1.0, 10.0, 5.5, step=0.1, label="denoise Guidance", interactive=not is_schnell)
|
| 218 |
seed = gr.Textbox('0', label="Seed (-1 for random)", visible=True)
|
|
|
|
| 224 |
output_image = gr.Image(label="Generated Image")
|
| 225 |
edit_btn.click(
|
| 226 |
fn=edit,
|
| 227 |
+
inputs=[brush_canvas,
|
| 228 |
source_prompt, target_prompt,
|
| 229 |
inversion_num_steps, denoise_num_steps,
|
| 230 |
skip_step,
|