File size: 7,335 Bytes
f961e67 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
from functools import partial
from typing import Callable, List, Optional, Union, Tuple
import torch
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNet2DConditionModel
# from diffusers import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import \
StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler,PNDMScheduler, LMSDiscreteScheduler
from modified_stable_diffusion import ModifiedStableDiffusionPipeline
from torchvision.transforms import ToPILImage
import matplotlib.pyplot as plt
### credit to: https://github.com/cccntu/efficient-prompt-to-prompt
def backward_ddim(x_t, alpha_t, alpha_tm1, eps_xt):
""" from noise to image"""
return (
alpha_tm1**0.5
* (
(alpha_t**-0.5 - alpha_tm1**-0.5) * x_t
+ ((1 / alpha_tm1 - 1) ** 0.5 - (1 / alpha_t - 1) ** 0.5) * eps_xt
)
+ x_t
)
def forward_ddim(x_t, alpha_t, alpha_tp1, eps_xt):
""" from image to noise, it's the same as backward_ddim"""
return backward_ddim(x_t, alpha_t, alpha_tp1, eps_xt)
class InversableStableDiffusionPipeline(ModifiedStableDiffusionPipeline):
def __init__(self,
vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
requires_safety_checker: bool = False,
):
super(InversableStableDiffusionPipeline, self).__init__(vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
requires_safety_checker)
self.forward_diffusion = partial(self.backward_diffusion, reverse_process=True)
self.count = 0
def get_random_latents(self, latents=None, height=512, width=512, generator=None):
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
batch_size = 1
device = self._execution_device
num_channels_latents = self.unet.in_channels
latents = self.prepare_latents(
batch_size,
num_channels_latents,
height,
width,
self.text_encoder.dtype,
device,
generator,
latents,
)
return latents
@torch.inference_mode()
def get_text_embedding(self, prompt):
text_input_ids = self.tokenizer(
prompt,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
return text_embeddings
@torch.inference_mode()
def get_image_latents(self, image, sample=True, rng_generator=None):
encoding_dist = self.vae.encode(image).latent_dist
if sample:
encoding = encoding_dist.sample(generator=rng_generator)
else:
encoding = encoding_dist.mode()
latents = encoding * 0.18215
return latents
@torch.inference_mode()
def backward_diffusion(
self,
use_old_emb_i=25,
text_embeddings=None,
old_text_embeddings=None,
new_text_embeddings=None,
latents: Optional[torch.FloatTensor] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
reverse_process: True = False,
**kwargs,
):
""" Generate image from text prompt and latents
"""
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
if old_text_embeddings is not None and new_text_embeddings is not None:
prompt_to_prompt = True
else:
prompt_to_prompt = False
for i, t in enumerate(self.progress_bar(timesteps_tensor if not reverse_process else reversed(timesteps_tensor))):
if prompt_to_prompt:
if i < use_old_emb_i:
text_embeddings = old_text_embeddings
else:
text_embeddings = new_text_embeddings
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input, t, encoder_hidden_states=text_embeddings
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
prev_timestep = (
t
- self.scheduler.config.num_train_timesteps
// self.scheduler.num_inference_steps
)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# ddim
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
if reverse_process:
alpha_prod_t, alpha_prod_t_prev = alpha_prod_t_prev, alpha_prod_t
latents = backward_ddim(
x_t=latents,
alpha_t=alpha_prod_t,
alpha_tm1=alpha_prod_t_prev,
eps_xt=noise_pred,
)
return latents
@torch.inference_mode()
def decode_image(self, latents: torch.FloatTensor, **kwargs):
scaled_latents = 1 / 0.18215 * latents
image = [
self.vae.decode(scaled_latents[i : i + 1]).sample for i in range(len(latents))
]
image = torch.cat(image, dim=0)
return image
@torch.inference_mode()
def torch_to_numpy(self, image):
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
return image
|