iexamples
Browse files- .gitattributes +1 -0
- ComfyUI/comfyui_screenshot.png +0 -0
- README.md +1 -1
- app.py +44 -24
- examples/bg.png +3 -0
- examples/cat.png +3 -0
- examples/julien.png +3 -0
- examples/lecun.png +3 -0
- examples/old_jump.png +3 -0
- utils.py +56 -19
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
ComfyUI/comfyui_screenshot.png
CHANGED
|
|
Git LFS Details
|
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: Layerdiffusion Gradio Unofficial
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
|
|
|
| 1 |
---
|
| 2 |
title: Layerdiffusion Gradio Unofficial
|
| 3 |
+
emoji: 🍰
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
app.py
CHANGED
|
@@ -12,6 +12,7 @@ from utils import (
|
|
| 12 |
postprocess_image,
|
| 13 |
preprocess_image,
|
| 14 |
downloadModels,
|
|
|
|
| 15 |
)
|
| 16 |
|
| 17 |
sys.path.append(os.path.dirname("./ComfyUI/"))
|
|
@@ -42,7 +43,9 @@ downloadModels()
|
|
| 42 |
|
| 43 |
with torch.inference_mode():
|
| 44 |
ckpt_load_checkpoint = CheckpointLoaderSimple().load_checkpoint
|
| 45 |
-
ckpt = ckpt_load_checkpoint(
|
|
|
|
|
|
|
| 46 |
|
| 47 |
cliptextencode = CLIPTextEncode().encode
|
| 48 |
emptylatentimage_generate = EmptyLatentImage().generate
|
|
@@ -72,6 +75,7 @@ def predict(
|
|
| 72 |
cfg: float,
|
| 73 |
denoise: float,
|
| 74 |
):
|
|
|
|
| 75 |
try:
|
| 76 |
with torch.inference_mode():
|
| 77 |
cliptextencode_prompt = cliptextencode(
|
|
@@ -139,7 +143,7 @@ def predict(
|
|
| 139 |
)
|
| 140 |
|
| 141 |
rgb_img = tensor_to_pil(vaedecode_sample[0])
|
| 142 |
-
return flatten([rgb_img])
|
| 143 |
else:
|
| 144 |
layereddiffusionapply_sample = ld_fg_apply_layered_diffusion(
|
| 145 |
config="SDXL, Conv Injection", weight=1, model=ckpt[0]
|
|
@@ -177,28 +181,37 @@ def predict(
|
|
| 177 |
mask = tensor_to_pil(mask[0])
|
| 178 |
rgb_img = tensor_to_pil(vaedecode_sample[0])
|
| 179 |
|
| 180 |
-
return flatten([rgba_img, mask])
|
| 181 |
# return flatten([rgba_img, mask, rgb_img, ld_image])
|
| 182 |
except Exception as e:
|
| 183 |
raise gr.Error(e)
|
| 184 |
|
| 185 |
|
| 186 |
-
examples = [["An old men sit on a chair looking at the sky"]]
|
| 187 |
-
|
| 188 |
-
|
| 189 |
def flatten(l: List[List[any]]) -> List[any]:
|
| 190 |
return [item for sublist in l for item in sublist]
|
| 191 |
|
| 192 |
|
| 193 |
-
def predict_examples(
|
|
|
|
|
|
|
| 194 |
return predict(
|
| 195 |
-
prompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
)
|
| 197 |
|
| 198 |
|
| 199 |
css = """
|
| 200 |
.gradio-container{
|
| 201 |
-
max-width:
|
| 202 |
}
|
| 203 |
"""
|
| 204 |
with gr.Blocks(css=css) as blocks:
|
|
@@ -223,31 +236,38 @@ with gr.Blocks(css=css) as blocks:
|
|
| 223 |
label="Remove Background",
|
| 224 |
value=False,
|
| 225 |
)
|
| 226 |
-
input_image = gr.Image(
|
|
|
|
|
|
|
|
|
|
| 227 |
with gr.Accordion(open=False, label="Advanced Options"):
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
sampler_name = gr.Dropdown(
|
| 237 |
choices=samplers.KSampler.SAMPLERS,
|
| 238 |
label="Sampler Name",
|
| 239 |
-
value=
|
| 240 |
)
|
| 241 |
scheduler = gr.Dropdown(
|
| 242 |
choices=samplers.KSampler.SCHEDULERS,
|
| 243 |
label="Scheduler",
|
| 244 |
-
value=
|
| 245 |
)
|
| 246 |
steps = gr.Slider(
|
| 247 |
-
label="Steps", value=20, minimum=1, maximum=
|
| 248 |
)
|
| 249 |
cfg = gr.Number(
|
| 250 |
-
label="CFG", value=
|
| 251 |
)
|
| 252 |
denoise = gr.Number(
|
| 253 |
label="Denoise", value=1.0, minimum=0.0, maximum=1.0, step=0.01
|
|
@@ -269,12 +289,12 @@ with gr.Blocks(css=css) as blocks:
|
|
| 269 |
cfg,
|
| 270 |
denoise,
|
| 271 |
]
|
| 272 |
-
outputs = [gallery]
|
| 273 |
|
| 274 |
gr.Examples(
|
| 275 |
fn=predict_examples,
|
| 276 |
examples=examples,
|
| 277 |
-
inputs=[prompt, negative_prompt],
|
| 278 |
outputs=outputs,
|
| 279 |
cache_examples=False,
|
| 280 |
)
|
|
|
|
| 12 |
postprocess_image,
|
| 13 |
preprocess_image,
|
| 14 |
downloadModels,
|
| 15 |
+
examples,
|
| 16 |
)
|
| 17 |
|
| 18 |
sys.path.append(os.path.dirname("./ComfyUI/"))
|
|
|
|
| 43 |
|
| 44 |
with torch.inference_mode():
|
| 45 |
ckpt_load_checkpoint = CheckpointLoaderSimple().load_checkpoint
|
| 46 |
+
ckpt = ckpt_load_checkpoint(
|
| 47 |
+
ckpt_name="juggernautXL_version6Rundiffusion.safetensors"
|
| 48 |
+
)
|
| 49 |
|
| 50 |
cliptextencode = CLIPTextEncode().encode
|
| 51 |
emptylatentimage_generate = EmptyLatentImage().generate
|
|
|
|
| 75 |
cfg: float,
|
| 76 |
denoise: float,
|
| 77 |
):
|
| 78 |
+
seed = seed if seed != -1 else np.random.randint(0, 2**63 - 1)
|
| 79 |
try:
|
| 80 |
with torch.inference_mode():
|
| 81 |
cliptextencode_prompt = cliptextencode(
|
|
|
|
| 143 |
)
|
| 144 |
|
| 145 |
rgb_img = tensor_to_pil(vaedecode_sample[0])
|
| 146 |
+
return flatten([rgb_img]), seed
|
| 147 |
else:
|
| 148 |
layereddiffusionapply_sample = ld_fg_apply_layered_diffusion(
|
| 149 |
config="SDXL, Conv Injection", weight=1, model=ckpt[0]
|
|
|
|
| 181 |
mask = tensor_to_pil(mask[0])
|
| 182 |
rgb_img = tensor_to_pil(vaedecode_sample[0])
|
| 183 |
|
| 184 |
+
return flatten([rgba_img, mask]), seed
|
| 185 |
# return flatten([rgba_img, mask, rgb_img, ld_image])
|
| 186 |
except Exception as e:
|
| 187 |
raise gr.Error(e)
|
| 188 |
|
| 189 |
|
|
|
|
|
|
|
|
|
|
| 190 |
def flatten(l: List[List[any]]) -> List[any]:
|
| 191 |
return [item for sublist in l for item in sublist]
|
| 192 |
|
| 193 |
|
| 194 |
+
def predict_examples(
|
| 195 |
+
prompt, negative_prompt, input_image=None, remove_bg=False, cond_mode=None
|
| 196 |
+
):
|
| 197 |
return predict(
|
| 198 |
+
prompt,
|
| 199 |
+
negative_prompt,
|
| 200 |
+
input_image,
|
| 201 |
+
remove_bg,
|
| 202 |
+
cond_mode,
|
| 203 |
+
0,
|
| 204 |
+
"euler",
|
| 205 |
+
"normal",
|
| 206 |
+
20,
|
| 207 |
+
8.0,
|
| 208 |
+
1.0,
|
| 209 |
)
|
| 210 |
|
| 211 |
|
| 212 |
css = """
|
| 213 |
.gradio-container{
|
| 214 |
+
max-width: 85rem !important;
|
| 215 |
}
|
| 216 |
"""
|
| 217 |
with gr.Blocks(css=css) as blocks:
|
|
|
|
| 236 |
label="Remove Background",
|
| 237 |
value=False,
|
| 238 |
)
|
| 239 |
+
input_image = gr.Image(
|
| 240 |
+
label="Input Image",
|
| 241 |
+
type="pil",
|
| 242 |
+
)
|
| 243 |
with gr.Accordion(open=False, label="Advanced Options"):
|
| 244 |
+
with gr.Group():
|
| 245 |
+
with gr.Row():
|
| 246 |
+
seed = gr.Slider(
|
| 247 |
+
label="Seed",
|
| 248 |
+
value=-1,
|
| 249 |
+
minimum=-1,
|
| 250 |
+
maximum=0xFFFFFFFFFFFFFFFF,
|
| 251 |
+
step=1,
|
| 252 |
+
)
|
| 253 |
+
curr_seed = gr.Number(
|
| 254 |
+
value=-1, interactive=False, scale=0, label=" "
|
| 255 |
+
)
|
| 256 |
sampler_name = gr.Dropdown(
|
| 257 |
choices=samplers.KSampler.SAMPLERS,
|
| 258 |
label="Sampler Name",
|
| 259 |
+
value="dpmpp_2m_sde",
|
| 260 |
)
|
| 261 |
scheduler = gr.Dropdown(
|
| 262 |
choices=samplers.KSampler.SCHEDULERS,
|
| 263 |
label="Scheduler",
|
| 264 |
+
value="karras",
|
| 265 |
)
|
| 266 |
steps = gr.Slider(
|
| 267 |
+
label="Steps", value=20, minimum=1, maximum=50, step=1
|
| 268 |
)
|
| 269 |
cfg = gr.Number(
|
| 270 |
+
label="CFG", value=5.0, minimum=0.0, maximum=100.0, step=0.1
|
| 271 |
)
|
| 272 |
denoise = gr.Number(
|
| 273 |
label="Denoise", value=1.0, minimum=0.0, maximum=1.0, step=0.01
|
|
|
|
| 289 |
cfg,
|
| 290 |
denoise,
|
| 291 |
]
|
| 292 |
+
outputs = [gallery, curr_seed]
|
| 293 |
|
| 294 |
gr.Examples(
|
| 295 |
fn=predict_examples,
|
| 296 |
examples=examples,
|
| 297 |
+
inputs=[prompt, negative_prompt, input_image, remove_bg, cond_mode],
|
| 298 |
outputs=outputs,
|
| 299 |
cache_examples=False,
|
| 300 |
)
|
examples/bg.png
ADDED
|
Git LFS Details
|
examples/cat.png
ADDED
|
Git LFS Details
|
examples/julien.png
ADDED
|
Git LFS Details
|
examples/lecun.png
ADDED
|
Git LFS Details
|
examples/old_jump.png
ADDED
|
Git LFS Details
|
utils.py
CHANGED
|
@@ -20,25 +20,26 @@ def tensor_to_pil(images: torch.Tensor | List[torch.Tensor]) -> List[Image.Image
|
|
| 20 |
return imgs
|
| 21 |
|
| 22 |
|
| 23 |
-
def pad_image(input_image):
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
)
|
| 29 |
-
im_padded = Image.
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
if w == h:
|
| 34 |
return im_padded
|
| 35 |
-
elif
|
| 36 |
-
|
| 37 |
-
new_image.
|
|
|
|
| 38 |
return new_image
|
| 39 |
else:
|
| 40 |
-
|
| 41 |
-
new_image.
|
|
|
|
| 42 |
return new_image
|
| 43 |
|
| 44 |
|
|
@@ -95,10 +96,14 @@ def postprocess_image(result: torch.Tensor, im_size: list) -> np.ndarray:
|
|
| 95 |
|
| 96 |
def downloadModels():
|
| 97 |
MODEL_PATH = hf_hub_download(
|
| 98 |
-
repo_id="
|
| 99 |
-
|
| 100 |
-
filename="juggernautXL_v8Rundiffusion.safetensors",
|
| 101 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
LAYERS_PATH = snapshot_download(
|
| 103 |
repo_id="LayerDiffusion/layerdiffusion-v1", allow_patterns="*.safetensors"
|
| 104 |
)
|
|
@@ -112,3 +117,35 @@ def downloadModels():
|
|
| 112 |
)
|
| 113 |
if not model_target_path.exists():
|
| 114 |
os.symlink(MODEL_PATH, model_target_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
return imgs
|
| 21 |
|
| 22 |
|
| 23 |
+
def pad_image(input_image, background_color=(0, 0, 0)):
|
| 24 |
+
w, h = input_image.size
|
| 25 |
+
pad_w = (64 - w % 64) % 64
|
| 26 |
+
pad_h = (64 - h % 64) % 64
|
| 27 |
+
|
| 28 |
+
new_size = (w + pad_w, h + pad_h)
|
| 29 |
+
im_padded = Image.new(input_image.mode, new_size, background_color)
|
| 30 |
+
im_padded.paste(input_image, (pad_w // 2, pad_h // 2))
|
| 31 |
+
|
| 32 |
+
if im_padded.size[0] == im_padded.size[1]:
|
|
|
|
| 33 |
return im_padded
|
| 34 |
+
elif im_padded.size[0] > im_padded.size[1]:
|
| 35 |
+
new_size = (im_padded.size[0], im_padded.size[0])
|
| 36 |
+
new_image = Image.new(im_padded.mode, new_size, background_color)
|
| 37 |
+
new_image.paste(im_padded, (0, (new_size[1] - im_padded.size[1]) // 2))
|
| 38 |
return new_image
|
| 39 |
else:
|
| 40 |
+
new_size = (im_padded.size[1], im_padded.size[1])
|
| 41 |
+
new_image = Image.new(im_padded.mode, new_size, background_color)
|
| 42 |
+
new_image.paste(im_padded, ((new_size[0] - im_padded.size[0]) // 2, 0))
|
| 43 |
return new_image
|
| 44 |
|
| 45 |
|
|
|
|
| 96 |
|
| 97 |
def downloadModels():
|
| 98 |
MODEL_PATH = hf_hub_download(
|
| 99 |
+
repo_id="RunDiffusion/Juggernaut-XL-v6",
|
| 100 |
+
filename="juggernautXL_version6Rundiffusion.safetensors",
|
|
|
|
| 101 |
)
|
| 102 |
+
# MODEL_PATH = hf_hub_download(
|
| 103 |
+
# repo_id="lllyasviel/fav_models",
|
| 104 |
+
# subfolder="fav",
|
| 105 |
+
# filename="juggernautXL_v8Rundiffusion.safetensors",
|
| 106 |
+
# )
|
| 107 |
LAYERS_PATH = snapshot_download(
|
| 108 |
repo_id="LayerDiffusion/layerdiffusion-v1", allow_patterns="*.safetensors"
|
| 109 |
)
|
|
|
|
| 117 |
)
|
| 118 |
if not model_target_path.exists():
|
| 119 |
os.symlink(MODEL_PATH, model_target_path)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
examples = [
|
| 123 |
+
[
|
| 124 |
+
"An old men sit on a chair looking at the sky",
|
| 125 |
+
"ugly distorted image, low quality, text, bad, not good ,watermark",
|
| 126 |
+
None,
|
| 127 |
+
False,
|
| 128 |
+
None,
|
| 129 |
+
],
|
| 130 |
+
[
|
| 131 |
+
"A beautiful toucan bird flying in the sky",
|
| 132 |
+
"ugly distorted image, low quality, text, bad, not good ,watermark",
|
| 133 |
+
"./examples/bg.png",
|
| 134 |
+
False,
|
| 135 |
+
"SDXL, Background",
|
| 136 |
+
],
|
| 137 |
+
[
|
| 138 |
+
"A men watching a concert",
|
| 139 |
+
"ugly distorted image, low quality, text, bad, not good ,watermark",
|
| 140 |
+
"./examples/lecun.png",
|
| 141 |
+
True,
|
| 142 |
+
"SDXL, Foreground",
|
| 143 |
+
],
|
| 144 |
+
[
|
| 145 |
+
"A men watching a concert",
|
| 146 |
+
"ugly distorted image, low quality, text, bad, not good ,watermark",
|
| 147 |
+
"./examples/julien.png",
|
| 148 |
+
True,
|
| 149 |
+
"SDXL, Foreground",
|
| 150 |
+
],
|
| 151 |
+
]
|