Spaces:
Runtime error
Runtime error
update composable adapter
#8
by
Adapter
- opened
- app.py +95 -279
- requirements.txt +0 -1
app.py
CHANGED
|
@@ -18,14 +18,10 @@ from torch import autocast
|
|
| 18 |
from ldm.inference_base import (DEFAULT_NEGATIVE_PROMPT, diffusion_inference, get_adapters, get_sd_models)
|
| 19 |
from ldm.modules.extra_condition import api
|
| 20 |
from ldm.modules.extra_condition.api import (ExtraCondition, get_adapter_feature, get_cond_model)
|
| 21 |
-
import numpy as np
|
| 22 |
-
from ldm.util import read_state_dict
|
| 23 |
|
| 24 |
torch.set_grad_enabled(False)
|
| 25 |
|
| 26 |
-
|
| 27 |
-
supported_cond = ['style', 'color', 'sketch', 'sketch', 'openpose', 'depth', 'canny']
|
| 28 |
-
draw_map = gr.Interface(lambda x: x, gr.Image(source="canvas"), gr.Image())
|
| 29 |
|
| 30 |
# download the checkpoints
|
| 31 |
urls = {
|
|
@@ -34,32 +30,12 @@ urls = {
|
|
| 34 |
'models/t2iadapter_openpose_sd14v1.pth', 'models/t2iadapter_seg_sd14v1.pth',
|
| 35 |
'models/t2iadapter_sketch_sd14v1.pth', 'models/t2iadapter_depth_sd14v1.pth',
|
| 36 |
'third-party-models/body_pose_model.pth', "models/t2iadapter_style_sd14v1.pth",
|
| 37 |
-
"models/t2iadapter_canny_sd14v1.pth"
|
| 38 |
-
"models/t2iadapter_canny_sd15v2.pth", "models/t2iadapter_depth_sd15v2.pth",
|
| 39 |
-
"models/t2iadapter_sketch_sd15v2.pth"
|
| 40 |
],
|
| 41 |
'runwayml/stable-diffusion-v1-5': ['v1-5-pruned-emaonly.ckpt'],
|
| 42 |
-
'CompVis/stable-diffusion-v-1-4-original':['sd-v1-4.ckpt'],
|
| 43 |
'andite/anything-v4.0': ['anything-v4.0-pruned.ckpt', 'anything-v4.0.vae.pt'],
|
| 44 |
}
|
| 45 |
|
| 46 |
-
# download image samples
|
| 47 |
-
torch.hub.download_url_to_file(
|
| 48 |
-
'https://user-images.githubusercontent.com/52127135/223114920-cae3e723-3683-424a-bebc-0875479f2409.jpg',
|
| 49 |
-
'cyber_style.jpg')
|
| 50 |
-
torch.hub.download_url_to_file(
|
| 51 |
-
'https://user-images.githubusercontent.com/52127135/223114946-6ccc127f-cb58-443e-8677-805f5dbaf6f1.png',
|
| 52 |
-
'sword.png')
|
| 53 |
-
torch.hub.download_url_to_file(
|
| 54 |
-
'https://user-images.githubusercontent.com/52127135/223121793-20c2ac6a-5a4f-4ff8-88ea-6d007a7959dd.png',
|
| 55 |
-
'white.png')
|
| 56 |
-
torch.hub.download_url_to_file(
|
| 57 |
-
'https://user-images.githubusercontent.com/52127135/223127404-4a3748cf-85a6-40f3-af31-a74e206db96e.jpeg',
|
| 58 |
-
'scream_style.jpeg')
|
| 59 |
-
torch.hub.download_url_to_file(
|
| 60 |
-
'https://user-images.githubusercontent.com/52127135/223127433-8768913f-9872-4d24-b883-a19a3eb20623.jpg',
|
| 61 |
-
'motorcycle.jpg')
|
| 62 |
-
|
| 63 |
if os.path.exists('models') == False:
|
| 64 |
os.mkdir('models')
|
| 65 |
for repo in urls:
|
|
@@ -88,142 +64,99 @@ parser.add_argument(
|
|
| 88 |
global_opt = parser.parse_args()
|
| 89 |
global_opt.config = 'configs/stable-diffusion/sd-v1-inference.yaml'
|
| 90 |
for cond_name in supported_cond:
|
| 91 |
-
|
| 92 |
-
setattr(global_opt, f'{cond_name}_adapter_ckpt', f'models/t2iadapter_{cond_name}_sd15v2.pth')
|
| 93 |
-
else:
|
| 94 |
-
setattr(global_opt, f'{cond_name}_adapter_ckpt', f'models/t2iadapter_{cond_name}_sd14v1.pth')
|
| 95 |
global_opt.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 96 |
global_opt.max_resolution = 512 * 512
|
| 97 |
global_opt.sampler = 'ddim'
|
| 98 |
global_opt.cond_weight = 1.0
|
| 99 |
global_opt.C = 4
|
| 100 |
global_opt.f = 8
|
|
|
|
|
|
|
|
|
|
| 101 |
# adapters and models to processing condition inputs
|
| 102 |
adapters = {}
|
| 103 |
cond_models = {}
|
| 104 |
torch.cuda.empty_cache()
|
| 105 |
|
| 106 |
|
| 107 |
-
def
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
im1 = (im1.clip(0, 255)).astype(np.uint8)
|
| 112 |
-
|
| 113 |
-
return im1
|
| 114 |
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
# stable-diffusion model
|
| 119 |
-
self.sd_model, self.sampler = get_sd_models(global_opt)
|
| 120 |
|
| 121 |
-
def run(self, *args):
|
| 122 |
opt = copy.deepcopy(global_opt)
|
| 123 |
-
opt.prompt, opt.neg_prompt, opt.scale, opt.n_samples, opt.seed, opt.steps, opt.resize_short_edge, opt.cond_tau
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
pl_sd = read_state_dict(ckpt)
|
| 129 |
-
if "state_dict" in pl_sd:
|
| 130 |
-
pl_sd = pl_sd["state_dict"]
|
| 131 |
-
else:
|
| 132 |
-
pl_sd = pl_sd
|
| 133 |
-
self.sd_model.load_state_dict(pl_sd, strict=False)
|
| 134 |
-
del pl_sd
|
| 135 |
-
self.base_model = opt.base_model
|
| 136 |
-
if self.base_model!='v1-5-pruned-emaonly.ckpt' and self.base_model!='sd-v1-4.ckpt':
|
| 137 |
-
vae_sd = torch.load(os.path.join('models', 'anything-v4.0.vae.pt'), map_location="cuda")
|
| 138 |
-
st = vae_sd["state_dict"]
|
| 139 |
-
self.sd_model.first_stage_model.load_state_dict(st, strict=False)
|
| 140 |
-
del st
|
| 141 |
-
|
| 142 |
-
with torch.inference_mode(), \
|
| 143 |
-
self.sd_model.ema_scope(), \
|
| 144 |
-
autocast('cuda'):
|
| 145 |
-
|
| 146 |
-
inps = []
|
| 147 |
-
for i in range(0, len(args) - 9, len(supported_cond)):
|
| 148 |
-
inps.append(args[i:i + len(supported_cond)])
|
| 149 |
-
|
| 150 |
-
conds = []
|
| 151 |
-
activated_conds = []
|
| 152 |
-
|
| 153 |
-
ims1 = []
|
| 154 |
-
ims2 = []
|
| 155 |
-
for idx, (b, im1, im2, cond_weight) in enumerate(zip(*inps)):
|
| 156 |
-
if b != 'Nothing' and (im1 is not None or im2 is not None):
|
| 157 |
-
if im1 is not None and isinstance(im1,dict):
|
| 158 |
-
im1 = im1['mask']
|
| 159 |
-
im1 = draw_transfer(im1)
|
| 160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
if im1 is not None:
|
| 162 |
h, w, _ = im1.shape
|
| 163 |
else:
|
| 164 |
h, w, _ = im2.shape
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
ims1.append(im1)
|
| 170 |
-
ims2.append(im2)
|
| 171 |
-
continue
|
| 172 |
-
if b != 'Nothing':
|
| 173 |
-
if im1 is not None and isinstance(im1,dict):
|
| 174 |
-
im1 = im1['mask']
|
| 175 |
-
im1 = draw_transfer(im1)
|
| 176 |
-
im2 = im1
|
| 177 |
-
cv2.imwrite('sketch.png', im1)
|
| 178 |
-
if im1 is not None:
|
| 179 |
-
im1 = cv2.resize(im1, (w, h), interpolation=cv2.INTER_CUBIC)
|
| 180 |
-
if im2 is not None:
|
| 181 |
-
im2 = cv2.resize(im2, (w, h), interpolation=cv2.INTER_CUBIC)
|
| 182 |
ims1.append(im1)
|
| 183 |
ims2.append(im2)
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
else:
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
if cond_name in adapters:
|
| 194 |
-
adapters[cond_name]['model'] = adapters[cond_name]['model'].to(opt.device)
|
| 195 |
-
else:
|
| 196 |
-
adapters[cond_name] = get_adapters(opt, getattr(ExtraCondition, cond_name))
|
| 197 |
-
adapters[cond_name]['cond_weight'] = cond_weight
|
| 198 |
|
| 199 |
-
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
conds.append(process_cond_module(opt, (255.-ims2[idx]).astype(np.uint8), cond_name, None))
|
| 208 |
-
else:
|
| 209 |
-
conds.append(process_cond_module(opt, ims2[idx], cond_name, None))
|
| 210 |
|
| 211 |
-
|
| 212 |
-
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
|
| 228 |
|
| 229 |
def change_visible(im1, im2, val):
|
|
@@ -239,14 +172,13 @@ def change_visible(im1, im2, val):
|
|
| 239 |
outputs[im2] = gr.update(visible=True)
|
| 240 |
return outputs
|
| 241 |
|
| 242 |
-
|
|
|
|
| 243 |
|
| 244 |
DESCRIPTION += f'<p>Gradio demo for **T2I-Adapter**: [[GitHub]](https://github.com/TencentARC/T2I-Adapter), [[Paper]](https://arxiv.org/abs/2302.08453). If T2I-Adapter is helpful, please help to ⭐ the [Github Repo](https://github.com/TencentARC/T2I-Adapter) and recommend it to your friends 😊 </p>'
|
| 245 |
|
| 246 |
DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
| 247 |
|
| 248 |
-
processer = process()
|
| 249 |
-
|
| 250 |
with gr.Blocks(css='style.css') as demo:
|
| 251 |
gr.Markdown(DESCRIPTION)
|
| 252 |
|
|
@@ -260,7 +192,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 260 |
with gr.Box():
|
| 261 |
gr.Markdown("<h5><center>Style & Color</center></h5>")
|
| 262 |
with gr.Row():
|
| 263 |
-
for cond_name in
|
| 264 |
with gr.Box():
|
| 265 |
with gr.Column():
|
| 266 |
if cond_name == 'style':
|
|
@@ -277,7 +209,6 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 277 |
interactive=True,
|
| 278 |
value="Nothing",
|
| 279 |
)
|
| 280 |
-
|
| 281 |
im1 = gr.Image(
|
| 282 |
source='upload', label="Image", interactive=True, visible=False, type="numpy")
|
| 283 |
im2 = gr.Image(
|
|
@@ -297,38 +228,11 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 297 |
ims1.append(im1)
|
| 298 |
ims2.append(im2)
|
| 299 |
cond_weights.append(cond_weight)
|
| 300 |
-
|
| 301 |
-
with gr.Box():
|
| 302 |
-
gr.Markdown("<h5><center>Drawing</center></h5>")
|
| 303 |
-
with gr.Column():
|
| 304 |
-
btn1 = gr.Radio(
|
| 305 |
-
choices=["Sketch", "Nothing"],
|
| 306 |
-
label=f"Input type for drawing",
|
| 307 |
-
interactive=True,
|
| 308 |
-
value="Nothing")
|
| 309 |
-
im1 = gr.Image(source='canvas', tool='color-sketch', label='Pay attention to adjusting stylus thickness!', visible=False)
|
| 310 |
-
im2 = im1
|
| 311 |
-
cond_weight = gr.Slider(
|
| 312 |
-
label="Condition weight",
|
| 313 |
-
minimum=0,
|
| 314 |
-
maximum=5,
|
| 315 |
-
step=0.05,
|
| 316 |
-
value=1,
|
| 317 |
-
interactive=True)
|
| 318 |
-
|
| 319 |
-
fn = partial(change_visible, im1, im2)
|
| 320 |
-
btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
|
| 321 |
-
|
| 322 |
-
btns.append(btn1)
|
| 323 |
-
ims1.append(im1)
|
| 324 |
-
ims2.append(im2)
|
| 325 |
-
cond_weights.append(cond_weight)
|
| 326 |
-
|
| 327 |
with gr.Column(scale=4):
|
| 328 |
with gr.Box():
|
| 329 |
gr.Markdown("<h5><center>Structure</center></h5>")
|
| 330 |
with gr.Row():
|
| 331 |
-
for cond_name in
|
| 332 |
with gr.Box():
|
| 333 |
with gr.Column():
|
| 334 |
if cond_name == 'openpose':
|
|
@@ -345,7 +249,6 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 345 |
interactive=True,
|
| 346 |
value="Nothing",
|
| 347 |
)
|
| 348 |
-
|
| 349 |
im1 = gr.Image(
|
| 350 |
source='upload', label="Image", interactive=True, visible=False, type="numpy")
|
| 351 |
im2 = gr.Image(
|
|
@@ -360,124 +263,37 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 360 |
|
| 361 |
fn = partial(change_visible, im1, im2)
|
| 362 |
btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
|
|
|
|
| 363 |
btns.append(btn1)
|
| 364 |
ims1.append(im1)
|
| 365 |
ims2.append(im2)
|
| 366 |
cond_weights.append(cond_weight)
|
| 367 |
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
submit = gr.Button("Generate")
|
| 386 |
-
|
| 387 |
-
with gr.Box():
|
| 388 |
-
gr.Markdown("<h5><center>Results</center></h5>")
|
| 389 |
-
with gr.Column():
|
| 390 |
-
output = gr.Gallery().style(grid=2, height='auto')
|
| 391 |
-
cond = gr.Gallery().style(grid=2, height='auto')
|
| 392 |
|
| 393 |
-
|
|
|
|
|
|
|
|
|
|
| 394 |
|
| 395 |
-
inps
|
| 396 |
-
submit.click(fn=processer.run, inputs=inps, outputs=[output, cond])
|
| 397 |
-
|
| 398 |
-
ex = gr.Examples([
|
| 399 |
-
[
|
| 400 |
-
"Image",
|
| 401 |
-
"Nothing",
|
| 402 |
-
"Nothing",
|
| 403 |
-
"Image",
|
| 404 |
-
"Nothing",
|
| 405 |
-
"Nothing",
|
| 406 |
-
"Nothing",
|
| 407 |
-
"cyber_style.jpg",
|
| 408 |
-
"white.png",
|
| 409 |
-
"white.png",
|
| 410 |
-
"sword.png",
|
| 411 |
-
"white.png",
|
| 412 |
-
"white.png",
|
| 413 |
-
"white.png",
|
| 414 |
-
"white.png",
|
| 415 |
-
"white.png",
|
| 416 |
-
"white.png",
|
| 417 |
-
"white.png",
|
| 418 |
-
"white.png",
|
| 419 |
-
"white.png",
|
| 420 |
-
"white.png",
|
| 421 |
-
1,
|
| 422 |
-
1,
|
| 423 |
-
1,
|
| 424 |
-
1,
|
| 425 |
-
1,
|
| 426 |
-
1,
|
| 427 |
-
1,
|
| 428 |
-
"master sword",
|
| 429 |
-
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
| 430 |
-
7.5,
|
| 431 |
-
1,
|
| 432 |
-
2500,
|
| 433 |
-
50,
|
| 434 |
-
512,
|
| 435 |
-
1,
|
| 436 |
-
"v1-5-pruned-emaonly.ckpt",
|
| 437 |
-
],
|
| 438 |
-
[
|
| 439 |
-
"Image",
|
| 440 |
-
"Nothing",
|
| 441 |
-
"Nothing",
|
| 442 |
-
"Image",
|
| 443 |
-
"Nothing",
|
| 444 |
-
"Nothing",
|
| 445 |
-
"Nothing",
|
| 446 |
-
"scream_style.jpeg",
|
| 447 |
-
"white.png",
|
| 448 |
-
"white.png",
|
| 449 |
-
"motorcycle.jpg",
|
| 450 |
-
"white.png",
|
| 451 |
-
"white.png",
|
| 452 |
-
"white.png",
|
| 453 |
-
"white.png",
|
| 454 |
-
"white.png",
|
| 455 |
-
"white.png",
|
| 456 |
-
"white.png",
|
| 457 |
-
"white.png",
|
| 458 |
-
"white.png",
|
| 459 |
-
"white.png",
|
| 460 |
-
1,
|
| 461 |
-
1,
|
| 462 |
-
1,
|
| 463 |
-
1,
|
| 464 |
-
1,
|
| 465 |
-
1,
|
| 466 |
-
1,
|
| 467 |
-
"motorcycle",
|
| 468 |
-
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
| 469 |
-
7.5,
|
| 470 |
-
1,
|
| 471 |
-
2500,
|
| 472 |
-
50,
|
| 473 |
-
512,
|
| 474 |
-
1,
|
| 475 |
-
"v1-5-pruned-emaonly.ckpt",
|
| 476 |
-
],
|
| 477 |
-
],
|
| 478 |
-
fn=processer.run,
|
| 479 |
-
inputs=inps,
|
| 480 |
-
outputs=[output, cond],
|
| 481 |
-
cache_examples=True)
|
| 482 |
|
|
|
|
|
|
|
| 483 |
demo.queue().launch(debug=True, server_name='0.0.0.0')
|
|
|
|
| 18 |
from ldm.inference_base import (DEFAULT_NEGATIVE_PROMPT, diffusion_inference, get_adapters, get_sd_models)
|
| 19 |
from ldm.modules.extra_condition import api
|
| 20 |
from ldm.modules.extra_condition.api import (ExtraCondition, get_adapter_feature, get_cond_model)
|
|
|
|
|
|
|
| 21 |
|
| 22 |
torch.set_grad_enabled(False)
|
| 23 |
|
| 24 |
+
supported_cond = ['style', 'color', 'canny', 'sketch', 'openpose', 'depth']
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# download the checkpoints
|
| 27 |
urls = {
|
|
|
|
| 30 |
'models/t2iadapter_openpose_sd14v1.pth', 'models/t2iadapter_seg_sd14v1.pth',
|
| 31 |
'models/t2iadapter_sketch_sd14v1.pth', 'models/t2iadapter_depth_sd14v1.pth',
|
| 32 |
'third-party-models/body_pose_model.pth', "models/t2iadapter_style_sd14v1.pth",
|
| 33 |
+
"models/t2iadapter_canny_sd14v1.pth"
|
|
|
|
|
|
|
| 34 |
],
|
| 35 |
'runwayml/stable-diffusion-v1-5': ['v1-5-pruned-emaonly.ckpt'],
|
|
|
|
| 36 |
'andite/anything-v4.0': ['anything-v4.0-pruned.ckpt', 'anything-v4.0.vae.pt'],
|
| 37 |
}
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
if os.path.exists('models') == False:
|
| 40 |
os.mkdir('models')
|
| 41 |
for repo in urls:
|
|
|
|
| 64 |
global_opt = parser.parse_args()
|
| 65 |
global_opt.config = 'configs/stable-diffusion/sd-v1-inference.yaml'
|
| 66 |
for cond_name in supported_cond:
|
| 67 |
+
setattr(global_opt, f'{cond_name}_adapter_ckpt', f'models/t2iadapter_{cond_name}_sd14v1.pth')
|
|
|
|
|
|
|
|
|
|
| 68 |
global_opt.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 69 |
global_opt.max_resolution = 512 * 512
|
| 70 |
global_opt.sampler = 'ddim'
|
| 71 |
global_opt.cond_weight = 1.0
|
| 72 |
global_opt.C = 4
|
| 73 |
global_opt.f = 8
|
| 74 |
+
|
| 75 |
+
# stable-diffusion model
|
| 76 |
+
sd_model, sampler = get_sd_models(global_opt)
|
| 77 |
# adapters and models to processing condition inputs
|
| 78 |
adapters = {}
|
| 79 |
cond_models = {}
|
| 80 |
torch.cuda.empty_cache()
|
| 81 |
|
| 82 |
|
| 83 |
+
def run(*args):
|
| 84 |
+
with torch.inference_mode(), \
|
| 85 |
+
sd_model.ema_scope(), \
|
| 86 |
+
autocast('cuda'):
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
+
inps = []
|
| 89 |
+
for i in range(0, len(args) - 8, len(supported_cond)):
|
| 90 |
+
inps.append(args[i:i + len(supported_cond)])
|
|
|
|
|
|
|
| 91 |
|
|
|
|
| 92 |
opt = copy.deepcopy(global_opt)
|
| 93 |
+
opt.prompt, opt.neg_prompt, opt.scale, opt.n_samples, opt.seed, opt.steps, opt.resize_short_edge, opt.cond_tau \
|
| 94 |
+
= args[-8:]
|
| 95 |
+
|
| 96 |
+
conds = []
|
| 97 |
+
activated_conds = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
ims1 = []
|
| 100 |
+
ims2 = []
|
| 101 |
+
for idx, (b, im1, im2, cond_weight) in enumerate(zip(*inps)):
|
| 102 |
+
if idx > 1:
|
| 103 |
+
if im1 is not None or im2 is not None:
|
| 104 |
if im1 is not None:
|
| 105 |
h, w, _ = im1.shape
|
| 106 |
else:
|
| 107 |
h, w, _ = im2.shape
|
| 108 |
+
break
|
| 109 |
+
# resize all the images to the same size
|
| 110 |
+
for idx, (b, im1, im2, cond_weight) in enumerate(zip(*inps)):
|
| 111 |
+
if idx == 0:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
ims1.append(im1)
|
| 113 |
ims2.append(im2)
|
| 114 |
+
continue
|
| 115 |
+
if im1 is not None:
|
| 116 |
+
im1 = cv2.resize(im1, (w, h), interpolation=cv2.INTER_CUBIC)
|
| 117 |
+
if im2 is not None:
|
| 118 |
+
im2 = cv2.resize(im2, (w, h), interpolation=cv2.INTER_CUBIC)
|
| 119 |
+
ims1.append(im1)
|
| 120 |
+
ims2.append(im2)
|
| 121 |
+
|
| 122 |
+
for idx, (b, _, _, cond_weight) in enumerate(zip(*inps)):
|
| 123 |
+
cond_name = supported_cond[idx]
|
| 124 |
+
if b == 'Nothing':
|
| 125 |
+
if cond_name in adapters:
|
| 126 |
+
adapters[cond_name]['model'] = adapters[cond_name]['model'].cpu()
|
| 127 |
+
else:
|
| 128 |
+
activated_conds.append(cond_name)
|
| 129 |
+
if cond_name in adapters:
|
| 130 |
+
adapters[cond_name]['model'] = adapters[cond_name]['model'].to(opt.device)
|
| 131 |
else:
|
| 132 |
+
adapters[cond_name] = get_adapters(opt, getattr(ExtraCondition, cond_name))
|
| 133 |
+
adapters[cond_name]['cond_weight'] = cond_weight
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
+
process_cond_module = getattr(api, f'get_cond_{cond_name}')
|
| 136 |
|
| 137 |
+
if b == 'Image':
|
| 138 |
+
if cond_name not in cond_models:
|
| 139 |
+
cond_models[cond_name] = get_cond_model(opt, getattr(ExtraCondition, cond_name))
|
| 140 |
+
conds.append(process_cond_module(opt, ims1[idx], 'image', cond_models[cond_name]))
|
| 141 |
+
else:
|
| 142 |
+
conds.append(process_cond_module(opt, ims2[idx], cond_name, None))
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
+
adapter_features, append_to_context = get_adapter_feature(
|
| 145 |
+
conds, [adapters[cond_name] for cond_name in activated_conds])
|
| 146 |
|
| 147 |
+
output_conds = []
|
| 148 |
+
for cond in conds:
|
| 149 |
+
output_conds.append(tensor2img(cond, rgb2bgr=False))
|
| 150 |
|
| 151 |
+
ims = []
|
| 152 |
+
seed_everything(opt.seed)
|
| 153 |
+
for _ in range(opt.n_samples):
|
| 154 |
+
result = diffusion_inference(opt, sd_model, sampler, adapter_features, append_to_context)
|
| 155 |
+
ims.append(tensor2img(result, rgb2bgr=False))
|
| 156 |
|
| 157 |
+
# Clear GPU memory cache so less likely to OOM
|
| 158 |
+
torch.cuda.empty_cache()
|
| 159 |
+
return ims, output_conds
|
| 160 |
|
| 161 |
|
| 162 |
def change_visible(im1, im2, val):
|
|
|
|
| 172 |
outputs[im2] = gr.update(visible=True)
|
| 173 |
return outputs
|
| 174 |
|
| 175 |
+
|
| 176 |
+
DESCRIPTION = '# [Composable T2I-Adapter](https://github.com/TencentARC/T2I-Adapter)'
|
| 177 |
|
| 178 |
DESCRIPTION += f'<p>Gradio demo for **T2I-Adapter**: [[GitHub]](https://github.com/TencentARC/T2I-Adapter), [[Paper]](https://arxiv.org/abs/2302.08453). If T2I-Adapter is helpful, please help to ⭐ the [Github Repo](https://github.com/TencentARC/T2I-Adapter) and recommend it to your friends 😊 </p>'
|
| 179 |
|
| 180 |
DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
| 181 |
|
|
|
|
|
|
|
| 182 |
with gr.Blocks(css='style.css') as demo:
|
| 183 |
gr.Markdown(DESCRIPTION)
|
| 184 |
|
|
|
|
| 192 |
with gr.Box():
|
| 193 |
gr.Markdown("<h5><center>Style & Color</center></h5>")
|
| 194 |
with gr.Row():
|
| 195 |
+
for cond_name in supported_cond[:2]:
|
| 196 |
with gr.Box():
|
| 197 |
with gr.Column():
|
| 198 |
if cond_name == 'style':
|
|
|
|
| 209 |
interactive=True,
|
| 210 |
value="Nothing",
|
| 211 |
)
|
|
|
|
| 212 |
im1 = gr.Image(
|
| 213 |
source='upload', label="Image", interactive=True, visible=False, type="numpy")
|
| 214 |
im2 = gr.Image(
|
|
|
|
| 228 |
ims1.append(im1)
|
| 229 |
ims2.append(im2)
|
| 230 |
cond_weights.append(cond_weight)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
with gr.Column(scale=4):
|
| 232 |
with gr.Box():
|
| 233 |
gr.Markdown("<h5><center>Structure</center></h5>")
|
| 234 |
with gr.Row():
|
| 235 |
+
for cond_name in supported_cond[2:6]:
|
| 236 |
with gr.Box():
|
| 237 |
with gr.Column():
|
| 238 |
if cond_name == 'openpose':
|
|
|
|
| 249 |
interactive=True,
|
| 250 |
value="Nothing",
|
| 251 |
)
|
|
|
|
| 252 |
im1 = gr.Image(
|
| 253 |
source='upload', label="Image", interactive=True, visible=False, type="numpy")
|
| 254 |
im2 = gr.Image(
|
|
|
|
| 263 |
|
| 264 |
fn = partial(change_visible, im1, im2)
|
| 265 |
btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
|
| 266 |
+
|
| 267 |
btns.append(btn1)
|
| 268 |
ims1.append(im1)
|
| 269 |
ims2.append(im2)
|
| 270 |
cond_weights.append(cond_weight)
|
| 271 |
|
| 272 |
+
with gr.Column():
|
| 273 |
+
prompt = gr.Textbox(label="Prompt")
|
| 274 |
+
|
| 275 |
+
with gr.Accordion('Advanced options', open=False):
|
| 276 |
+
neg_prompt = gr.Textbox(label="Negative Prompt", value=DEFAULT_NEGATIVE_PROMPT)
|
| 277 |
+
scale = gr.Slider(
|
| 278 |
+
label="Guidance Scale (Classifier free guidance)", value=7.5, minimum=1, maximum=20, step=0.1)
|
| 279 |
+
n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=8, step=1)
|
| 280 |
+
seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=10000, step=1)
|
| 281 |
+
steps = gr.Slider(label="Steps", value=50, minimum=10, maximum=100, step=1)
|
| 282 |
+
resize_short_edge = gr.Slider(label="Image resolution", value=512, minimum=320, maximum=1024, step=1)
|
| 283 |
+
cond_tau = gr.Slider(
|
| 284 |
+
label="timestamp parameter that determines until which step the adapter is applied",
|
| 285 |
+
value=1.0,
|
| 286 |
+
minimum=0.1,
|
| 287 |
+
maximum=1.0,
|
| 288 |
+
step=0.05)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
with gr.Row():
|
| 291 |
+
submit = gr.Button("Generate")
|
| 292 |
+
output = gr.Gallery().style(grid=2, height='auto')
|
| 293 |
+
cond = gr.Gallery().style(grid=2, height='auto')
|
| 294 |
|
| 295 |
+
inps = list(chain(btns, ims1, ims2, cond_weights))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
+
inps.extend([prompt, neg_prompt, scale, n_samples, seed, steps, resize_short_edge, cond_tau])
|
| 298 |
+
submit.click(fn=run, inputs=inps, outputs=[output, cond])
|
| 299 |
demo.queue().launch(debug=True, server_name='0.0.0.0')
|
requirements.txt
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
xformers==0.0.16
|
| 2 |
transformers==4.19.2
|
| 3 |
diffusers==0.11.1
|
| 4 |
invisible_watermark==0.1.5
|
|
|
|
|
|
|
| 1 |
transformers==4.19.2
|
| 2 |
diffusers==0.11.1
|
| 3 |
invisible_watermark==0.1.5
|