| | |
| |
|
| | import einops |
| | import numpy as np |
| | import torch |
| | from PIL import Image |
| | import sys |
| | import os |
| | import yaml |
| |
|
| | CONTROL_NET_PATH = '/home/takuma/Documents/co/ControlNet-v1-1-nightly/' |
| | CONTROL_NET_MODEL_PATH = '../../ControlNet-v1-1' |
| | sys.path.append(CONTROL_NET_PATH) |
| |
|
| | from share import * |
| | from pytorch_lightning import seed_everything |
| | from cldm.model import create_model, load_state_dict |
| | from cldm.ddim_hacked import DDIMSampler |
| | from diffusers.utils import load_image |
| |
|
| | test_prompt = "best quality, extremely detailed" |
| | test_negative_prompt = "lowres, bad anatomy, worst quality, low quality" |
| |
|
| | @torch.no_grad() |
| | def generate(prompt, n_prompt, seed, control, ddim_steps=20, eta=0.0, scale=9.0, H=512, W=512, strength = 1.0, guess_mode=False): |
| | seed_everything(seed) |
| |
|
| | cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt] * num_samples)]} |
| |
|
| | if model.global_average_pooling: |
| | un_cond = {"c_concat": None, "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]} |
| | else: |
| | un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]} |
| | shape = (4, H // 8, W // 8) |
| | |
| | model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) |
| | latent = torch.randn((1,) + shape, device="cpu", generator=torch.Generator(device="cpu").manual_seed(seed)).cuda() |
| | samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, |
| | shape, cond, x_T=latent, |
| | verbose=False, eta=eta, |
| | unconditional_guidance_scale=scale, |
| | unconditional_conditioning=un_cond) |
| | x_samples = model.decode_first_stage(samples) |
| | x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) |
| | |
| | return Image.fromarray(x_samples[0]) |
| |
|
| | def control_images(control_image_folder, model_name): |
| | with open('./control_images.yaml', 'r') as f: |
| | d = yaml.safe_load(f) |
| | filenames = d[model_name] |
| | return [Image.open(f'{control_image_folder}/{fn}').convert("RGB") for fn in filenames] |
| |
|
| | if __name__ == '__main__': |
| | model_name = sys.argv[1] |
| |
|
| | control_image_folder = './control_images/converted/' |
| | output_image_folder = './output_images/ref/' |
| | os.makedirs(output_image_folder, exist_ok=True) |
| |
|
| | if model_name == 'p_sd15s2_lineart_anime': |
| | base_model_file = 'anything-v3-full.safetensors' |
| | else: |
| | base_model_file = 'v1-5-pruned.ckpt' |
| |
|
| | num_samples = 1 |
| | model = create_model(f'{CONTROL_NET_MODEL_PATH}/control_v11{model_name}.yaml').cpu() |
| | model.load_state_dict(load_state_dict(f'{CONTROL_NET_PATH}/models/{base_model_file}', location='cuda'), strict=False) |
| | model.load_state_dict(load_state_dict(f'{CONTROL_NET_MODEL_PATH}/control_v11{model_name}.pth', location='cuda'), strict=False) |
| | model = model.cuda() |
| | ddim_sampler = DDIMSampler(model) |
| |
|
| | for i, control_image in enumerate(control_images(control_image_folder, model_name)): |
| | control = np.array(control_image)[:,:,::-1].copy() |
| | control = torch.from_numpy(control).float().cuda() / 255.0 |
| | control = torch.stack([control for _ in range(num_samples)], dim=0) |
| | control = einops.rearrange(control, 'b h w c -> b c h w').clone() |
| |
|
| | for seed in range(4): |
| | image = generate(test_prompt, test_negative_prompt, seed=seed, control=control) |
| | image.save(f'{output_image_folder}output_{model_name}_{i}_{seed}.png') |
| |
|