Diffusers Bot
commited on
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- v0.27.0/README.md +0 -0
- v0.27.0/bit_diffusion.py +264 -0
- v0.27.0/checkpoint_merger.py +288 -0
- v0.27.0/clip_guided_images_mixing_stable_diffusion.py +445 -0
- v0.27.0/clip_guided_stable_diffusion.py +337 -0
- v0.27.0/clip_guided_stable_diffusion_img2img.py +483 -0
- v0.27.0/composable_stable_diffusion.py +527 -0
- v0.27.0/ddim_noise_comparative_analysis.py +190 -0
- v0.27.0/dps_pipeline.py +466 -0
- v0.27.0/edict_pipeline.py +264 -0
- v0.27.0/gluegen.py +811 -0
- v0.27.0/iadb.py +149 -0
- v0.27.0/imagic_stable_diffusion.py +469 -0
- v0.27.0/img2img_inpainting.py +437 -0
- v0.27.0/instaflow_one_step.py +680 -0
- v0.27.0/interpolate_stable_diffusion.py +498 -0
- v0.27.0/ip_adapter_face_id.py +1406 -0
- v0.27.0/latent_consistency_img2img.py +825 -0
- v0.27.0/latent_consistency_interpolate.py +990 -0
- v0.27.0/latent_consistency_txt2img.py +726 -0
- v0.27.0/llm_grounded_diffusion.py +1558 -0
- v0.27.0/lpw_stable_diffusion.py +1364 -0
- v0.27.0/lpw_stable_diffusion_onnx.py +1148 -0
- v0.27.0/lpw_stable_diffusion_xl.py +0 -0
- v0.27.0/magic_mix.py +152 -0
- v0.27.0/marigold_depth_estimation.py +605 -0
- v0.27.0/masked_stable_diffusion_img2img.py +262 -0
- v0.27.0/mixture_canvas.py +501 -0
- v0.27.0/mixture_tiling.py +405 -0
- v0.27.0/multilingual_stable_diffusion.py +410 -0
- v0.27.0/one_step_unet.py +24 -0
- v0.27.0/pipeline_animatediff_controlnet.py +1114 -0
- v0.27.0/pipeline_animatediff_img2video.py +980 -0
- v0.27.0/pipeline_demofusion_sdxl.py +1383 -0
- v0.27.0/pipeline_fabric.py +751 -0
- v0.27.0/pipeline_null_text_inversion.py +260 -0
- v0.27.0/pipeline_prompt2prompt.py +1422 -0
- v0.27.0/pipeline_sdxl_style_aligned.py +1906 -0
- v0.27.0/pipeline_stable_diffusion_upscale_ldm3d.py +772 -0
- v0.27.0/pipeline_stable_diffusion_xl_controlnet_adapter.py +1406 -0
- v0.27.0/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1850 -0
- v0.27.0/pipeline_stable_diffusion_xl_instantid.py +1061 -0
- v0.27.0/pipeline_stable_diffusion_xl_ipex.py +1429 -0
- v0.27.0/pipeline_zero1to3.py +788 -0
- v0.27.0/regional_prompting_stable_diffusion.py +620 -0
- v0.27.0/rerender_a_video.py +1194 -0
- v0.27.0/run_onnx_controlnet.py +911 -0
- v0.27.0/run_tensorrt_controlnet.py +1022 -0
- v0.27.0/scheduling_ufogen.py +523 -0
- v0.27.0/sd_text2img_k_diffusion.py +414 -0
v0.27.0/README.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.27.0/bit_diffusion.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from einops import rearrange, reduce
|
| 5 |
+
|
| 6 |
+
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
|
| 7 |
+
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
|
| 8 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
BITS = 8
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
|
| 15 |
+
def decimal_to_bits(x, bits=BITS):
|
| 16 |
+
"""expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
|
| 17 |
+
device = x.device
|
| 18 |
+
|
| 19 |
+
x = (x * 255).int().clamp(0, 255)
|
| 20 |
+
|
| 21 |
+
mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
|
| 22 |
+
mask = rearrange(mask, "d -> d 1 1")
|
| 23 |
+
x = rearrange(x, "b c h w -> b c 1 h w")
|
| 24 |
+
|
| 25 |
+
bits = ((x & mask) != 0).float()
|
| 26 |
+
bits = rearrange(bits, "b c d h w -> b (c d) h w")
|
| 27 |
+
bits = bits * 2 - 1
|
| 28 |
+
return bits
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def bits_to_decimal(x, bits=BITS):
|
| 32 |
+
"""expects bits from -1 to 1, outputs image tensor from 0 to 1"""
|
| 33 |
+
device = x.device
|
| 34 |
+
|
| 35 |
+
x = (x > 0).int()
|
| 36 |
+
mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
|
| 37 |
+
|
| 38 |
+
mask = rearrange(mask, "d -> d 1 1")
|
| 39 |
+
x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
|
| 40 |
+
dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
|
| 41 |
+
return (dec / 255).clamp(0.0, 1.0)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
|
| 45 |
+
def ddim_bit_scheduler_step(
|
| 46 |
+
self,
|
| 47 |
+
model_output: torch.FloatTensor,
|
| 48 |
+
timestep: int,
|
| 49 |
+
sample: torch.FloatTensor,
|
| 50 |
+
eta: float = 0.0,
|
| 51 |
+
use_clipped_model_output: bool = True,
|
| 52 |
+
generator=None,
|
| 53 |
+
return_dict: bool = True,
|
| 54 |
+
) -> Union[DDIMSchedulerOutput, Tuple]:
|
| 55 |
+
"""
|
| 56 |
+
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
| 57 |
+
process from the learned model outputs (most often the predicted noise).
|
| 58 |
+
Args:
|
| 59 |
+
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
| 60 |
+
timestep (`int`): current discrete timestep in the diffusion chain.
|
| 61 |
+
sample (`torch.FloatTensor`):
|
| 62 |
+
current instance of sample being created by diffusion process.
|
| 63 |
+
eta (`float`): weight of noise for added noise in diffusion step.
|
| 64 |
+
use_clipped_model_output (`bool`): TODO
|
| 65 |
+
generator: random number generator.
|
| 66 |
+
return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
|
| 67 |
+
Returns:
|
| 68 |
+
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
|
| 69 |
+
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
| 70 |
+
returning a tuple, the first element is the sample tensor.
|
| 71 |
+
"""
|
| 72 |
+
if self.num_inference_steps is None:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
|
| 78 |
+
# Ideally, read DDIM paper in-detail understanding
|
| 79 |
+
|
| 80 |
+
# Notation (<variable name> -> <name in paper>
|
| 81 |
+
# - pred_noise_t -> e_theta(x_t, t)
|
| 82 |
+
# - pred_original_sample -> f_theta(x_t, t) or x_0
|
| 83 |
+
# - std_dev_t -> sigma_t
|
| 84 |
+
# - eta -> η
|
| 85 |
+
# - pred_sample_direction -> "direction pointing to x_t"
|
| 86 |
+
# - pred_prev_sample -> "x_t-1"
|
| 87 |
+
|
| 88 |
+
# 1. get previous step value (=t-1)
|
| 89 |
+
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
|
| 90 |
+
|
| 91 |
+
# 2. compute alphas, betas
|
| 92 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 93 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 94 |
+
|
| 95 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 96 |
+
|
| 97 |
+
# 3. compute predicted original sample from predicted noise also called
|
| 98 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 99 |
+
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
| 100 |
+
|
| 101 |
+
# 4. Clip "predicted x_0"
|
| 102 |
+
scale = self.bit_scale
|
| 103 |
+
if self.config.clip_sample:
|
| 104 |
+
pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
|
| 105 |
+
|
| 106 |
+
# 5. compute variance: "sigma_t(η)" -> see formula (16)
|
| 107 |
+
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
|
| 108 |
+
variance = self._get_variance(timestep, prev_timestep)
|
| 109 |
+
std_dev_t = eta * variance ** (0.5)
|
| 110 |
+
|
| 111 |
+
if use_clipped_model_output:
|
| 112 |
+
# the model_output is always re-derived from the clipped x_0 in Glide
|
| 113 |
+
model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
|
| 114 |
+
|
| 115 |
+
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 116 |
+
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
|
| 117 |
+
|
| 118 |
+
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 119 |
+
prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
|
| 120 |
+
|
| 121 |
+
if eta > 0:
|
| 122 |
+
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
|
| 123 |
+
device = model_output.device if torch.is_tensor(model_output) else "cpu"
|
| 124 |
+
noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
|
| 125 |
+
variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
|
| 126 |
+
|
| 127 |
+
prev_sample = prev_sample + variance
|
| 128 |
+
|
| 129 |
+
if not return_dict:
|
| 130 |
+
return (prev_sample,)
|
| 131 |
+
|
| 132 |
+
return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def ddpm_bit_scheduler_step(
|
| 136 |
+
self,
|
| 137 |
+
model_output: torch.FloatTensor,
|
| 138 |
+
timestep: int,
|
| 139 |
+
sample: torch.FloatTensor,
|
| 140 |
+
prediction_type="epsilon",
|
| 141 |
+
generator=None,
|
| 142 |
+
return_dict: bool = True,
|
| 143 |
+
) -> Union[DDPMSchedulerOutput, Tuple]:
|
| 144 |
+
"""
|
| 145 |
+
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
| 146 |
+
process from the learned model outputs (most often the predicted noise).
|
| 147 |
+
Args:
|
| 148 |
+
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
| 149 |
+
timestep (`int`): current discrete timestep in the diffusion chain.
|
| 150 |
+
sample (`torch.FloatTensor`):
|
| 151 |
+
current instance of sample being created by diffusion process.
|
| 152 |
+
prediction_type (`str`, default `epsilon`):
|
| 153 |
+
indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
|
| 154 |
+
generator: random number generator.
|
| 155 |
+
return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
|
| 156 |
+
Returns:
|
| 157 |
+
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
|
| 158 |
+
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
| 159 |
+
returning a tuple, the first element is the sample tensor.
|
| 160 |
+
"""
|
| 161 |
+
t = timestep
|
| 162 |
+
|
| 163 |
+
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
|
| 164 |
+
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
|
| 165 |
+
else:
|
| 166 |
+
predicted_variance = None
|
| 167 |
+
|
| 168 |
+
# 1. compute alphas, betas
|
| 169 |
+
alpha_prod_t = self.alphas_cumprod[t]
|
| 170 |
+
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
|
| 171 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 172 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 173 |
+
|
| 174 |
+
# 2. compute predicted original sample from predicted noise also called
|
| 175 |
+
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
|
| 176 |
+
if prediction_type == "epsilon":
|
| 177 |
+
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
| 178 |
+
elif prediction_type == "sample":
|
| 179 |
+
pred_original_sample = model_output
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError(f"Unsupported prediction_type {prediction_type}.")
|
| 182 |
+
|
| 183 |
+
# 3. Clip "predicted x_0"
|
| 184 |
+
scale = self.bit_scale
|
| 185 |
+
if self.config.clip_sample:
|
| 186 |
+
pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
|
| 187 |
+
|
| 188 |
+
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
|
| 189 |
+
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
|
| 190 |
+
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
|
| 191 |
+
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
|
| 192 |
+
|
| 193 |
+
# 5. Compute predicted previous sample µ_t
|
| 194 |
+
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
|
| 195 |
+
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
|
| 196 |
+
|
| 197 |
+
# 6. Add noise
|
| 198 |
+
variance = 0
|
| 199 |
+
if t > 0:
|
| 200 |
+
noise = torch.randn(
|
| 201 |
+
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
|
| 202 |
+
).to(model_output.device)
|
| 203 |
+
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
|
| 204 |
+
|
| 205 |
+
pred_prev_sample = pred_prev_sample + variance
|
| 206 |
+
|
| 207 |
+
if not return_dict:
|
| 208 |
+
return (pred_prev_sample,)
|
| 209 |
+
|
| 210 |
+
return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class BitDiffusion(DiffusionPipeline):
|
| 214 |
+
def __init__(
|
| 215 |
+
self,
|
| 216 |
+
unet: UNet2DConditionModel,
|
| 217 |
+
scheduler: Union[DDIMScheduler, DDPMScheduler],
|
| 218 |
+
bit_scale: Optional[float] = 1.0,
|
| 219 |
+
):
|
| 220 |
+
super().__init__()
|
| 221 |
+
self.bit_scale = bit_scale
|
| 222 |
+
self.scheduler.step = (
|
| 223 |
+
ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 227 |
+
|
| 228 |
+
@torch.no_grad()
|
| 229 |
+
def __call__(
|
| 230 |
+
self,
|
| 231 |
+
height: Optional[int] = 256,
|
| 232 |
+
width: Optional[int] = 256,
|
| 233 |
+
num_inference_steps: Optional[int] = 50,
|
| 234 |
+
generator: Optional[torch.Generator] = None,
|
| 235 |
+
batch_size: Optional[int] = 1,
|
| 236 |
+
output_type: Optional[str] = "pil",
|
| 237 |
+
return_dict: bool = True,
|
| 238 |
+
**kwargs,
|
| 239 |
+
) -> Union[Tuple, ImagePipelineOutput]:
|
| 240 |
+
latents = torch.randn(
|
| 241 |
+
(batch_size, self.unet.config.in_channels, height, width),
|
| 242 |
+
generator=generator,
|
| 243 |
+
)
|
| 244 |
+
latents = decimal_to_bits(latents) * self.bit_scale
|
| 245 |
+
latents = latents.to(self.device)
|
| 246 |
+
|
| 247 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 248 |
+
|
| 249 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 250 |
+
# predict the noise residual
|
| 251 |
+
noise_pred = self.unet(latents, t).sample
|
| 252 |
+
|
| 253 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 254 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 255 |
+
|
| 256 |
+
image = bits_to_decimal(latents)
|
| 257 |
+
|
| 258 |
+
if output_type == "pil":
|
| 259 |
+
image = self.numpy_to_pil(image)
|
| 260 |
+
|
| 261 |
+
if not return_dict:
|
| 262 |
+
return (image,)
|
| 263 |
+
|
| 264 |
+
return ImagePipelineOutput(images=image)
|
v0.27.0/checkpoint_merger.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import os
|
| 3 |
+
from typing import Dict, List, Union
|
| 4 |
+
|
| 5 |
+
import safetensors.torch
|
| 6 |
+
import torch
|
| 7 |
+
from huggingface_hub import snapshot_download
|
| 8 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
| 9 |
+
|
| 10 |
+
from diffusers import DiffusionPipeline, __version__
|
| 11 |
+
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
|
| 12 |
+
from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class CheckpointMergerPipeline(DiffusionPipeline):
|
| 16 |
+
"""
|
| 17 |
+
A class that supports merging diffusion models based on the discussion here:
|
| 18 |
+
https://github.com/huggingface/diffusers/issues/877
|
| 19 |
+
|
| 20 |
+
Example usage:-
|
| 21 |
+
|
| 22 |
+
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
|
| 23 |
+
|
| 24 |
+
merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
|
| 25 |
+
|
| 26 |
+
merged_pipe.to('cuda')
|
| 27 |
+
|
| 28 |
+
prompt = "An astronaut riding a unicycle on Mars"
|
| 29 |
+
|
| 30 |
+
results = merged_pipe(prompt)
|
| 31 |
+
|
| 32 |
+
## For more details, see the docstring for the merge method.
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
self.register_to_config()
|
| 38 |
+
super().__init__()
|
| 39 |
+
|
| 40 |
+
def _compare_model_configs(self, dict0, dict1):
|
| 41 |
+
if dict0 == dict1:
|
| 42 |
+
return True
|
| 43 |
+
else:
|
| 44 |
+
config0, meta_keys0 = self._remove_meta_keys(dict0)
|
| 45 |
+
config1, meta_keys1 = self._remove_meta_keys(dict1)
|
| 46 |
+
if config0 == config1:
|
| 47 |
+
print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
|
| 48 |
+
return True
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
def _remove_meta_keys(self, config_dict: Dict):
|
| 52 |
+
meta_keys = []
|
| 53 |
+
temp_dict = config_dict.copy()
|
| 54 |
+
for key in config_dict.keys():
|
| 55 |
+
if key.startswith("_"):
|
| 56 |
+
temp_dict.pop(key)
|
| 57 |
+
meta_keys.append(key)
|
| 58 |
+
return (temp_dict, meta_keys)
|
| 59 |
+
|
| 60 |
+
@torch.no_grad()
|
| 61 |
+
@validate_hf_hub_args
|
| 62 |
+
def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
|
| 63 |
+
"""
|
| 64 |
+
Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
|
| 65 |
+
in the argument 'pretrained_model_name_or_path_list' as a list.
|
| 66 |
+
|
| 67 |
+
Parameters:
|
| 68 |
+
-----------
|
| 69 |
+
pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
|
| 70 |
+
|
| 71 |
+
**kwargs:
|
| 72 |
+
Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
|
| 73 |
+
|
| 74 |
+
cache_dir, resume_download, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
|
| 75 |
+
|
| 76 |
+
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
| 77 |
+
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
| 78 |
+
|
| 79 |
+
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
|
| 80 |
+
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
|
| 81 |
+
|
| 82 |
+
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
| 83 |
+
|
| 84 |
+
variant - which variant of a pretrained model to load, e.g. "fp16" (None)
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
# Default kwargs from DiffusionPipeline
|
| 88 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
| 89 |
+
resume_download = kwargs.pop("resume_download", False)
|
| 90 |
+
force_download = kwargs.pop("force_download", False)
|
| 91 |
+
proxies = kwargs.pop("proxies", None)
|
| 92 |
+
local_files_only = kwargs.pop("local_files_only", False)
|
| 93 |
+
token = kwargs.pop("token", None)
|
| 94 |
+
variant = kwargs.pop("variant", None)
|
| 95 |
+
revision = kwargs.pop("revision", None)
|
| 96 |
+
torch_dtype = kwargs.pop("torch_dtype", None)
|
| 97 |
+
device_map = kwargs.pop("device_map", None)
|
| 98 |
+
|
| 99 |
+
alpha = kwargs.pop("alpha", 0.5)
|
| 100 |
+
interp = kwargs.pop("interp", None)
|
| 101 |
+
|
| 102 |
+
print("Received list", pretrained_model_name_or_path_list)
|
| 103 |
+
print(f"Combining with alpha={alpha}, interpolation mode={interp}")
|
| 104 |
+
|
| 105 |
+
checkpoint_count = len(pretrained_model_name_or_path_list)
|
| 106 |
+
# Ignore result from model_index_json comparision of the two checkpoints
|
| 107 |
+
force = kwargs.pop("force", False)
|
| 108 |
+
|
| 109 |
+
# If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
|
| 110 |
+
if checkpoint_count > 3 or checkpoint_count < 2:
|
| 111 |
+
raise ValueError(
|
| 112 |
+
"Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
|
| 113 |
+
" passed."
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
print("Received the right number of checkpoints")
|
| 117 |
+
# chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
|
| 118 |
+
# chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
|
| 119 |
+
|
| 120 |
+
# Validate that the checkpoints can be merged
|
| 121 |
+
# Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
|
| 122 |
+
config_dicts = []
|
| 123 |
+
for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
|
| 124 |
+
config_dict = DiffusionPipeline.load_config(
|
| 125 |
+
pretrained_model_name_or_path,
|
| 126 |
+
cache_dir=cache_dir,
|
| 127 |
+
resume_download=resume_download,
|
| 128 |
+
force_download=force_download,
|
| 129 |
+
proxies=proxies,
|
| 130 |
+
local_files_only=local_files_only,
|
| 131 |
+
token=token,
|
| 132 |
+
revision=revision,
|
| 133 |
+
)
|
| 134 |
+
config_dicts.append(config_dict)
|
| 135 |
+
|
| 136 |
+
comparison_result = True
|
| 137 |
+
for idx in range(1, len(config_dicts)):
|
| 138 |
+
comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
|
| 139 |
+
if not force and comparison_result is False:
|
| 140 |
+
raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
|
| 141 |
+
print(config_dicts[0], config_dicts[1])
|
| 142 |
+
print("Compatible model_index.json files found")
|
| 143 |
+
# Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
|
| 144 |
+
cached_folders = []
|
| 145 |
+
for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
|
| 146 |
+
folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
|
| 147 |
+
allow_patterns = [os.path.join(k, "*") for k in folder_names]
|
| 148 |
+
allow_patterns += [
|
| 149 |
+
WEIGHTS_NAME,
|
| 150 |
+
SCHEDULER_CONFIG_NAME,
|
| 151 |
+
CONFIG_NAME,
|
| 152 |
+
ONNX_WEIGHTS_NAME,
|
| 153 |
+
DiffusionPipeline.config_name,
|
| 154 |
+
]
|
| 155 |
+
requested_pipeline_class = config_dict.get("_class_name")
|
| 156 |
+
user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
|
| 157 |
+
|
| 158 |
+
cached_folder = (
|
| 159 |
+
pretrained_model_name_or_path
|
| 160 |
+
if os.path.isdir(pretrained_model_name_or_path)
|
| 161 |
+
else snapshot_download(
|
| 162 |
+
pretrained_model_name_or_path,
|
| 163 |
+
cache_dir=cache_dir,
|
| 164 |
+
resume_download=resume_download,
|
| 165 |
+
proxies=proxies,
|
| 166 |
+
local_files_only=local_files_only,
|
| 167 |
+
token=token,
|
| 168 |
+
revision=revision,
|
| 169 |
+
allow_patterns=allow_patterns,
|
| 170 |
+
user_agent=user_agent,
|
| 171 |
+
)
|
| 172 |
+
)
|
| 173 |
+
print("Cached Folder", cached_folder)
|
| 174 |
+
cached_folders.append(cached_folder)
|
| 175 |
+
|
| 176 |
+
# Step 3:-
|
| 177 |
+
# Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
|
| 178 |
+
final_pipe = DiffusionPipeline.from_pretrained(
|
| 179 |
+
cached_folders[0],
|
| 180 |
+
torch_dtype=torch_dtype,
|
| 181 |
+
device_map=device_map,
|
| 182 |
+
variant=variant,
|
| 183 |
+
)
|
| 184 |
+
final_pipe.to(self.device)
|
| 185 |
+
|
| 186 |
+
checkpoint_path_2 = None
|
| 187 |
+
if len(cached_folders) > 2:
|
| 188 |
+
checkpoint_path_2 = os.path.join(cached_folders[2])
|
| 189 |
+
|
| 190 |
+
if interp == "sigmoid":
|
| 191 |
+
theta_func = CheckpointMergerPipeline.sigmoid
|
| 192 |
+
elif interp == "inv_sigmoid":
|
| 193 |
+
theta_func = CheckpointMergerPipeline.inv_sigmoid
|
| 194 |
+
elif interp == "add_diff":
|
| 195 |
+
theta_func = CheckpointMergerPipeline.add_difference
|
| 196 |
+
else:
|
| 197 |
+
theta_func = CheckpointMergerPipeline.weighted_sum
|
| 198 |
+
|
| 199 |
+
# Find each module's state dict.
|
| 200 |
+
for attr in final_pipe.config.keys():
|
| 201 |
+
if not attr.startswith("_"):
|
| 202 |
+
checkpoint_path_1 = os.path.join(cached_folders[1], attr)
|
| 203 |
+
if os.path.exists(checkpoint_path_1):
|
| 204 |
+
files = [
|
| 205 |
+
*glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
|
| 206 |
+
*glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
|
| 207 |
+
]
|
| 208 |
+
checkpoint_path_1 = files[0] if len(files) > 0 else None
|
| 209 |
+
if len(cached_folders) < 3:
|
| 210 |
+
checkpoint_path_2 = None
|
| 211 |
+
else:
|
| 212 |
+
checkpoint_path_2 = os.path.join(cached_folders[2], attr)
|
| 213 |
+
if os.path.exists(checkpoint_path_2):
|
| 214 |
+
files = [
|
| 215 |
+
*glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
|
| 216 |
+
*glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
|
| 217 |
+
]
|
| 218 |
+
checkpoint_path_2 = files[0] if len(files) > 0 else None
|
| 219 |
+
# For an attr if both checkpoint_path_1 and 2 are None, ignore.
|
| 220 |
+
# If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
|
| 221 |
+
if checkpoint_path_1 is None and checkpoint_path_2 is None:
|
| 222 |
+
print(f"Skipping {attr}: not present in 2nd or 3d model")
|
| 223 |
+
continue
|
| 224 |
+
try:
|
| 225 |
+
module = getattr(final_pipe, attr)
|
| 226 |
+
if isinstance(module, bool): # ignore requires_safety_checker boolean
|
| 227 |
+
continue
|
| 228 |
+
theta_0 = getattr(module, "state_dict")
|
| 229 |
+
theta_0 = theta_0()
|
| 230 |
+
|
| 231 |
+
update_theta_0 = getattr(module, "load_state_dict")
|
| 232 |
+
theta_1 = (
|
| 233 |
+
safetensors.torch.load_file(checkpoint_path_1)
|
| 234 |
+
if (checkpoint_path_1.endswith(".safetensors"))
|
| 235 |
+
else torch.load(checkpoint_path_1, map_location="cpu")
|
| 236 |
+
)
|
| 237 |
+
theta_2 = None
|
| 238 |
+
if checkpoint_path_2:
|
| 239 |
+
theta_2 = (
|
| 240 |
+
safetensors.torch.load_file(checkpoint_path_2)
|
| 241 |
+
if (checkpoint_path_2.endswith(".safetensors"))
|
| 242 |
+
else torch.load(checkpoint_path_2, map_location="cpu")
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
if not theta_0.keys() == theta_1.keys():
|
| 246 |
+
print(f"Skipping {attr}: key mismatch")
|
| 247 |
+
continue
|
| 248 |
+
if theta_2 and not theta_1.keys() == theta_2.keys():
|
| 249 |
+
print(f"Skipping {attr}:y mismatch")
|
| 250 |
+
except Exception as e:
|
| 251 |
+
print(f"Skipping {attr} do to an unexpected error: {str(e)}")
|
| 252 |
+
continue
|
| 253 |
+
print(f"MERGING {attr}")
|
| 254 |
+
|
| 255 |
+
for key in theta_0.keys():
|
| 256 |
+
if theta_2:
|
| 257 |
+
theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
|
| 258 |
+
else:
|
| 259 |
+
theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
|
| 260 |
+
|
| 261 |
+
del theta_1
|
| 262 |
+
del theta_2
|
| 263 |
+
update_theta_0(theta_0)
|
| 264 |
+
|
| 265 |
+
del theta_0
|
| 266 |
+
return final_pipe
|
| 267 |
+
|
| 268 |
+
@staticmethod
|
| 269 |
+
def weighted_sum(theta0, theta1, theta2, alpha):
|
| 270 |
+
return ((1 - alpha) * theta0) + (alpha * theta1)
|
| 271 |
+
|
| 272 |
+
# Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
|
| 273 |
+
@staticmethod
|
| 274 |
+
def sigmoid(theta0, theta1, theta2, alpha):
|
| 275 |
+
alpha = alpha * alpha * (3 - (2 * alpha))
|
| 276 |
+
return theta0 + ((theta1 - theta0) * alpha)
|
| 277 |
+
|
| 278 |
+
# Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
|
| 279 |
+
@staticmethod
|
| 280 |
+
def inv_sigmoid(theta0, theta1, theta2, alpha):
|
| 281 |
+
import math
|
| 282 |
+
|
| 283 |
+
alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
|
| 284 |
+
return theta0 + ((theta1 - theta0) * alpha)
|
| 285 |
+
|
| 286 |
+
@staticmethod
|
| 287 |
+
def add_difference(theta0, theta1, theta2, alpha):
|
| 288 |
+
return theta0 + (theta1 - theta2) * (1.0 - alpha)
|
v0.27.0/clip_guided_images_mixing_stable_diffusion.py
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import inspect
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers import (
|
| 13 |
+
AutoencoderKL,
|
| 14 |
+
DDIMScheduler,
|
| 15 |
+
DPMSolverMultistepScheduler,
|
| 16 |
+
LMSDiscreteScheduler,
|
| 17 |
+
PNDMScheduler,
|
| 18 |
+
UNet2DConditionModel,
|
| 19 |
+
)
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 21 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 22 |
+
from diffusers.utils import PIL_INTERPOLATION
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def preprocess(image, w, h):
|
| 27 |
+
if isinstance(image, torch.Tensor):
|
| 28 |
+
return image
|
| 29 |
+
elif isinstance(image, PIL.Image.Image):
|
| 30 |
+
image = [image]
|
| 31 |
+
|
| 32 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 33 |
+
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
| 34 |
+
image = np.concatenate(image, axis=0)
|
| 35 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 36 |
+
image = image.transpose(0, 3, 1, 2)
|
| 37 |
+
image = 2.0 * image - 1.0
|
| 38 |
+
image = torch.from_numpy(image)
|
| 39 |
+
elif isinstance(image[0], torch.Tensor):
|
| 40 |
+
image = torch.cat(image, dim=0)
|
| 41 |
+
return image
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
|
| 45 |
+
if not isinstance(v0, np.ndarray):
|
| 46 |
+
inputs_are_torch = True
|
| 47 |
+
input_device = v0.device
|
| 48 |
+
v0 = v0.cpu().numpy()
|
| 49 |
+
v1 = v1.cpu().numpy()
|
| 50 |
+
|
| 51 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 52 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 53 |
+
v2 = (1 - t) * v0 + t * v1
|
| 54 |
+
else:
|
| 55 |
+
theta_0 = np.arccos(dot)
|
| 56 |
+
sin_theta_0 = np.sin(theta_0)
|
| 57 |
+
theta_t = theta_0 * t
|
| 58 |
+
sin_theta_t = np.sin(theta_t)
|
| 59 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 60 |
+
s1 = sin_theta_t / sin_theta_0
|
| 61 |
+
v2 = s0 * v0 + s1 * v1
|
| 62 |
+
|
| 63 |
+
if inputs_are_torch:
|
| 64 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 65 |
+
|
| 66 |
+
return v2
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def spherical_dist_loss(x, y):
|
| 70 |
+
x = F.normalize(x, dim=-1)
|
| 71 |
+
y = F.normalize(y, dim=-1)
|
| 72 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def set_requires_grad(model, value):
|
| 76 |
+
for param in model.parameters():
|
| 77 |
+
param.requires_grad = value
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 81 |
+
def __init__(
|
| 82 |
+
self,
|
| 83 |
+
vae: AutoencoderKL,
|
| 84 |
+
text_encoder: CLIPTextModel,
|
| 85 |
+
clip_model: CLIPModel,
|
| 86 |
+
tokenizer: CLIPTokenizer,
|
| 87 |
+
unet: UNet2DConditionModel,
|
| 88 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 89 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 90 |
+
coca_model=None,
|
| 91 |
+
coca_tokenizer=None,
|
| 92 |
+
coca_transform=None,
|
| 93 |
+
):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.register_modules(
|
| 96 |
+
vae=vae,
|
| 97 |
+
text_encoder=text_encoder,
|
| 98 |
+
clip_model=clip_model,
|
| 99 |
+
tokenizer=tokenizer,
|
| 100 |
+
unet=unet,
|
| 101 |
+
scheduler=scheduler,
|
| 102 |
+
feature_extractor=feature_extractor,
|
| 103 |
+
coca_model=coca_model,
|
| 104 |
+
coca_tokenizer=coca_tokenizer,
|
| 105 |
+
coca_transform=coca_transform,
|
| 106 |
+
)
|
| 107 |
+
self.feature_extractor_size = (
|
| 108 |
+
feature_extractor.size
|
| 109 |
+
if isinstance(feature_extractor.size, int)
|
| 110 |
+
else feature_extractor.size["shortest_edge"]
|
| 111 |
+
)
|
| 112 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 113 |
+
set_requires_grad(self.text_encoder, False)
|
| 114 |
+
set_requires_grad(self.clip_model, False)
|
| 115 |
+
|
| 116 |
+
def freeze_vae(self):
|
| 117 |
+
set_requires_grad(self.vae, False)
|
| 118 |
+
|
| 119 |
+
def unfreeze_vae(self):
|
| 120 |
+
set_requires_grad(self.vae, True)
|
| 121 |
+
|
| 122 |
+
def freeze_unet(self):
|
| 123 |
+
set_requires_grad(self.unet, False)
|
| 124 |
+
|
| 125 |
+
def unfreeze_unet(self):
|
| 126 |
+
set_requires_grad(self.unet, True)
|
| 127 |
+
|
| 128 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 129 |
+
# get the original timestep using init_timestep
|
| 130 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 131 |
+
|
| 132 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 133 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 134 |
+
|
| 135 |
+
return timesteps, num_inference_steps - t_start
|
| 136 |
+
|
| 137 |
+
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
|
| 138 |
+
if not isinstance(image, torch.Tensor):
|
| 139 |
+
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
|
| 140 |
+
|
| 141 |
+
image = image.to(device=device, dtype=dtype)
|
| 142 |
+
|
| 143 |
+
if isinstance(generator, list):
|
| 144 |
+
init_latents = [
|
| 145 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 146 |
+
]
|
| 147 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 148 |
+
else:
|
| 149 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 150 |
+
|
| 151 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 152 |
+
init_latents = 0.18215 * init_latents
|
| 153 |
+
init_latents = init_latents.repeat_interleave(batch_size, dim=0)
|
| 154 |
+
|
| 155 |
+
noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
|
| 156 |
+
|
| 157 |
+
# get latents
|
| 158 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 159 |
+
latents = init_latents
|
| 160 |
+
|
| 161 |
+
return latents
|
| 162 |
+
|
| 163 |
+
def get_image_description(self, image):
|
| 164 |
+
transformed_image = self.coca_transform(image).unsqueeze(0)
|
| 165 |
+
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 166 |
+
generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
|
| 167 |
+
generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
|
| 168 |
+
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
|
| 169 |
+
|
| 170 |
+
def get_clip_image_embeddings(self, image, batch_size):
|
| 171 |
+
clip_image_input = self.feature_extractor.preprocess(image)
|
| 172 |
+
clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
|
| 173 |
+
image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
|
| 174 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 175 |
+
image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
|
| 176 |
+
return image_embeddings_clip
|
| 177 |
+
|
| 178 |
+
@torch.enable_grad()
|
| 179 |
+
def cond_fn(
|
| 180 |
+
self,
|
| 181 |
+
latents,
|
| 182 |
+
timestep,
|
| 183 |
+
index,
|
| 184 |
+
text_embeddings,
|
| 185 |
+
noise_pred_original,
|
| 186 |
+
original_image_embeddings_clip,
|
| 187 |
+
clip_guidance_scale,
|
| 188 |
+
):
|
| 189 |
+
latents = latents.detach().requires_grad_()
|
| 190 |
+
|
| 191 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 192 |
+
|
| 193 |
+
# predict the noise residual
|
| 194 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 195 |
+
|
| 196 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 197 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 198 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 199 |
+
# compute predicted original sample from predicted noise also called
|
| 200 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 201 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 202 |
+
|
| 203 |
+
fac = torch.sqrt(beta_prod_t)
|
| 204 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 205 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 206 |
+
sigma = self.scheduler.sigmas[index]
|
| 207 |
+
sample = latents - sigma * noise_pred
|
| 208 |
+
else:
|
| 209 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 210 |
+
|
| 211 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 212 |
+
sample = 1 / 0.18215 * sample
|
| 213 |
+
image = self.vae.decode(sample).sample
|
| 214 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 215 |
+
|
| 216 |
+
image = transforms.Resize(self.feature_extractor_size)(image)
|
| 217 |
+
image = self.normalize(image).to(latents.dtype)
|
| 218 |
+
|
| 219 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 220 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 221 |
+
|
| 222 |
+
loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
|
| 223 |
+
|
| 224 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 225 |
+
|
| 226 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 227 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 228 |
+
noise_pred = noise_pred_original
|
| 229 |
+
else:
|
| 230 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 231 |
+
return noise_pred, latents
|
| 232 |
+
|
| 233 |
+
@torch.no_grad()
|
| 234 |
+
def __call__(
|
| 235 |
+
self,
|
| 236 |
+
style_image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 237 |
+
content_image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 238 |
+
style_prompt: Optional[str] = None,
|
| 239 |
+
content_prompt: Optional[str] = None,
|
| 240 |
+
height: Optional[int] = 512,
|
| 241 |
+
width: Optional[int] = 512,
|
| 242 |
+
noise_strength: float = 0.6,
|
| 243 |
+
num_inference_steps: Optional[int] = 50,
|
| 244 |
+
guidance_scale: Optional[float] = 7.5,
|
| 245 |
+
batch_size: Optional[int] = 1,
|
| 246 |
+
eta: float = 0.0,
|
| 247 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 248 |
+
generator: Optional[torch.Generator] = None,
|
| 249 |
+
output_type: Optional[str] = "pil",
|
| 250 |
+
return_dict: bool = True,
|
| 251 |
+
slerp_latent_style_strength: float = 0.8,
|
| 252 |
+
slerp_prompt_style_strength: float = 0.1,
|
| 253 |
+
slerp_clip_image_style_strength: float = 0.1,
|
| 254 |
+
):
|
| 255 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 256 |
+
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
|
| 257 |
+
|
| 258 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 259 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 260 |
+
|
| 261 |
+
if isinstance(generator, torch.Generator) and batch_size > 1:
|
| 262 |
+
generator = [generator] + [None] * (batch_size - 1)
|
| 263 |
+
|
| 264 |
+
coca_is_none = [
|
| 265 |
+
("model", self.coca_model is None),
|
| 266 |
+
("tokenizer", self.coca_tokenizer is None),
|
| 267 |
+
("transform", self.coca_transform is None),
|
| 268 |
+
]
|
| 269 |
+
coca_is_none = [x[0] for x in coca_is_none if x[1]]
|
| 270 |
+
coca_is_none_str = ", ".join(coca_is_none)
|
| 271 |
+
# generate prompts with coca model if prompt is None
|
| 272 |
+
if content_prompt is None:
|
| 273 |
+
if len(coca_is_none):
|
| 274 |
+
raise ValueError(
|
| 275 |
+
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
|
| 276 |
+
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
| 277 |
+
)
|
| 278 |
+
content_prompt = self.get_image_description(content_image)
|
| 279 |
+
if style_prompt is None:
|
| 280 |
+
if len(coca_is_none):
|
| 281 |
+
raise ValueError(
|
| 282 |
+
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
|
| 283 |
+
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
| 284 |
+
)
|
| 285 |
+
style_prompt = self.get_image_description(style_image)
|
| 286 |
+
|
| 287 |
+
# get prompt text embeddings for content and style
|
| 288 |
+
content_text_input = self.tokenizer(
|
| 289 |
+
content_prompt,
|
| 290 |
+
padding="max_length",
|
| 291 |
+
max_length=self.tokenizer.model_max_length,
|
| 292 |
+
truncation=True,
|
| 293 |
+
return_tensors="pt",
|
| 294 |
+
)
|
| 295 |
+
content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
|
| 296 |
+
|
| 297 |
+
style_text_input = self.tokenizer(
|
| 298 |
+
style_prompt,
|
| 299 |
+
padding="max_length",
|
| 300 |
+
max_length=self.tokenizer.model_max_length,
|
| 301 |
+
truncation=True,
|
| 302 |
+
return_tensors="pt",
|
| 303 |
+
)
|
| 304 |
+
style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
|
| 305 |
+
|
| 306 |
+
text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
|
| 307 |
+
|
| 308 |
+
# duplicate text embeddings for each generation per prompt
|
| 309 |
+
text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
|
| 310 |
+
|
| 311 |
+
# set timesteps
|
| 312 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 313 |
+
extra_set_kwargs = {}
|
| 314 |
+
if accepts_offset:
|
| 315 |
+
extra_set_kwargs["offset"] = 1
|
| 316 |
+
|
| 317 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 318 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 319 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 320 |
+
self.scheduler.timesteps.to(self.device)
|
| 321 |
+
|
| 322 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
|
| 323 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 324 |
+
|
| 325 |
+
# Preprocess image
|
| 326 |
+
preprocessed_content_image = preprocess(content_image, width, height)
|
| 327 |
+
content_latents = self.prepare_latents(
|
| 328 |
+
preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
preprocessed_style_image = preprocess(style_image, width, height)
|
| 332 |
+
style_latents = self.prepare_latents(
|
| 333 |
+
preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
|
| 337 |
+
|
| 338 |
+
if clip_guidance_scale > 0:
|
| 339 |
+
content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
|
| 340 |
+
style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
|
| 341 |
+
clip_image_embeddings = slerp(
|
| 342 |
+
slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 346 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 347 |
+
# corresponds to doing no classifier free guidance.
|
| 348 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 349 |
+
# get unconditional embeddings for classifier free guidance
|
| 350 |
+
if do_classifier_free_guidance:
|
| 351 |
+
max_length = content_text_input.input_ids.shape[-1]
|
| 352 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 353 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 354 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 355 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
|
| 356 |
+
|
| 357 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 358 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 359 |
+
# to avoid doing two forward passes
|
| 360 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 361 |
+
|
| 362 |
+
# get the initial random noise unless the user supplied it
|
| 363 |
+
|
| 364 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 365 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 366 |
+
# However this currently doesn't work in `mps`.
|
| 367 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
| 368 |
+
latents_dtype = text_embeddings.dtype
|
| 369 |
+
if latents is None:
|
| 370 |
+
if self.device.type == "mps":
|
| 371 |
+
# randn does not work reproducibly on mps
|
| 372 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 373 |
+
self.device
|
| 374 |
+
)
|
| 375 |
+
else:
|
| 376 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 377 |
+
else:
|
| 378 |
+
if latents.shape != latents_shape:
|
| 379 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 380 |
+
latents = latents.to(self.device)
|
| 381 |
+
|
| 382 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 383 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 384 |
+
|
| 385 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 386 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 387 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 388 |
+
# and should be between [0, 1]
|
| 389 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 390 |
+
extra_step_kwargs = {}
|
| 391 |
+
if accepts_eta:
|
| 392 |
+
extra_step_kwargs["eta"] = eta
|
| 393 |
+
|
| 394 |
+
# check if the scheduler accepts generator
|
| 395 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 396 |
+
if accepts_generator:
|
| 397 |
+
extra_step_kwargs["generator"] = generator
|
| 398 |
+
|
| 399 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 400 |
+
for i, t in enumerate(timesteps):
|
| 401 |
+
# expand the latents if we are doing classifier free guidance
|
| 402 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 403 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 404 |
+
|
| 405 |
+
# predict the noise residual
|
| 406 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 407 |
+
|
| 408 |
+
# perform classifier free guidance
|
| 409 |
+
if do_classifier_free_guidance:
|
| 410 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 411 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 412 |
+
|
| 413 |
+
# perform clip guidance
|
| 414 |
+
if clip_guidance_scale > 0:
|
| 415 |
+
text_embeddings_for_guidance = (
|
| 416 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 417 |
+
)
|
| 418 |
+
noise_pred, latents = self.cond_fn(
|
| 419 |
+
latents,
|
| 420 |
+
t,
|
| 421 |
+
i,
|
| 422 |
+
text_embeddings_for_guidance,
|
| 423 |
+
noise_pred,
|
| 424 |
+
clip_image_embeddings,
|
| 425 |
+
clip_guidance_scale,
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 429 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 430 |
+
|
| 431 |
+
progress_bar.update()
|
| 432 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 433 |
+
latents = 1 / 0.18215 * latents
|
| 434 |
+
image = self.vae.decode(latents).sample
|
| 435 |
+
|
| 436 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 437 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 438 |
+
|
| 439 |
+
if output_type == "pil":
|
| 440 |
+
image = self.numpy_to_pil(image)
|
| 441 |
+
|
| 442 |
+
if not return_dict:
|
| 443 |
+
return (image, None)
|
| 444 |
+
|
| 445 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.27.0/clip_guided_stable_diffusion.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 9 |
+
|
| 10 |
+
from diffusers import (
|
| 11 |
+
AutoencoderKL,
|
| 12 |
+
DDIMScheduler,
|
| 13 |
+
DPMSolverMultistepScheduler,
|
| 14 |
+
LMSDiscreteScheduler,
|
| 15 |
+
PNDMScheduler,
|
| 16 |
+
UNet2DConditionModel,
|
| 17 |
+
)
|
| 18 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 19 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MakeCutouts(nn.Module):
|
| 23 |
+
def __init__(self, cut_size, cut_power=1.0):
|
| 24 |
+
super().__init__()
|
| 25 |
+
|
| 26 |
+
self.cut_size = cut_size
|
| 27 |
+
self.cut_power = cut_power
|
| 28 |
+
|
| 29 |
+
def forward(self, pixel_values, num_cutouts):
|
| 30 |
+
sideY, sideX = pixel_values.shape[2:4]
|
| 31 |
+
max_size = min(sideX, sideY)
|
| 32 |
+
min_size = min(sideX, sideY, self.cut_size)
|
| 33 |
+
cutouts = []
|
| 34 |
+
for _ in range(num_cutouts):
|
| 35 |
+
size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
|
| 36 |
+
offsetx = torch.randint(0, sideX - size + 1, ())
|
| 37 |
+
offsety = torch.randint(0, sideY - size + 1, ())
|
| 38 |
+
cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
|
| 39 |
+
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
|
| 40 |
+
return torch.cat(cutouts)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def spherical_dist_loss(x, y):
|
| 44 |
+
x = F.normalize(x, dim=-1)
|
| 45 |
+
y = F.normalize(y, dim=-1)
|
| 46 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def set_requires_grad(model, value):
|
| 50 |
+
for param in model.parameters():
|
| 51 |
+
param.requires_grad = value
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 55 |
+
"""CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
|
| 56 |
+
- https://github.com/Jack000/glid-3-xl
|
| 57 |
+
- https://github.dev/crowsonkb/k-diffusion
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
vae: AutoencoderKL,
|
| 63 |
+
text_encoder: CLIPTextModel,
|
| 64 |
+
clip_model: CLIPModel,
|
| 65 |
+
tokenizer: CLIPTokenizer,
|
| 66 |
+
unet: UNet2DConditionModel,
|
| 67 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 68 |
+
feature_extractor: CLIPImageProcessor,
|
| 69 |
+
):
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.register_modules(
|
| 72 |
+
vae=vae,
|
| 73 |
+
text_encoder=text_encoder,
|
| 74 |
+
clip_model=clip_model,
|
| 75 |
+
tokenizer=tokenizer,
|
| 76 |
+
unet=unet,
|
| 77 |
+
scheduler=scheduler,
|
| 78 |
+
feature_extractor=feature_extractor,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 82 |
+
self.cut_out_size = (
|
| 83 |
+
feature_extractor.size
|
| 84 |
+
if isinstance(feature_extractor.size, int)
|
| 85 |
+
else feature_extractor.size["shortest_edge"]
|
| 86 |
+
)
|
| 87 |
+
self.make_cutouts = MakeCutouts(self.cut_out_size)
|
| 88 |
+
|
| 89 |
+
set_requires_grad(self.text_encoder, False)
|
| 90 |
+
set_requires_grad(self.clip_model, False)
|
| 91 |
+
|
| 92 |
+
def freeze_vae(self):
|
| 93 |
+
set_requires_grad(self.vae, False)
|
| 94 |
+
|
| 95 |
+
def unfreeze_vae(self):
|
| 96 |
+
set_requires_grad(self.vae, True)
|
| 97 |
+
|
| 98 |
+
def freeze_unet(self):
|
| 99 |
+
set_requires_grad(self.unet, False)
|
| 100 |
+
|
| 101 |
+
def unfreeze_unet(self):
|
| 102 |
+
set_requires_grad(self.unet, True)
|
| 103 |
+
|
| 104 |
+
@torch.enable_grad()
|
| 105 |
+
def cond_fn(
|
| 106 |
+
self,
|
| 107 |
+
latents,
|
| 108 |
+
timestep,
|
| 109 |
+
index,
|
| 110 |
+
text_embeddings,
|
| 111 |
+
noise_pred_original,
|
| 112 |
+
text_embeddings_clip,
|
| 113 |
+
clip_guidance_scale,
|
| 114 |
+
num_cutouts,
|
| 115 |
+
use_cutouts=True,
|
| 116 |
+
):
|
| 117 |
+
latents = latents.detach().requires_grad_()
|
| 118 |
+
|
| 119 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 120 |
+
|
| 121 |
+
# predict the noise residual
|
| 122 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 123 |
+
|
| 124 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 125 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 126 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 127 |
+
# compute predicted original sample from predicted noise also called
|
| 128 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 129 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 130 |
+
|
| 131 |
+
fac = torch.sqrt(beta_prod_t)
|
| 132 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 133 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 134 |
+
sigma = self.scheduler.sigmas[index]
|
| 135 |
+
sample = latents - sigma * noise_pred
|
| 136 |
+
else:
|
| 137 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 138 |
+
|
| 139 |
+
sample = 1 / self.vae.config.scaling_factor * sample
|
| 140 |
+
image = self.vae.decode(sample).sample
|
| 141 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 142 |
+
|
| 143 |
+
if use_cutouts:
|
| 144 |
+
image = self.make_cutouts(image, num_cutouts)
|
| 145 |
+
else:
|
| 146 |
+
image = transforms.Resize(self.cut_out_size)(image)
|
| 147 |
+
image = self.normalize(image).to(latents.dtype)
|
| 148 |
+
|
| 149 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 150 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 151 |
+
|
| 152 |
+
if use_cutouts:
|
| 153 |
+
dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
|
| 154 |
+
dists = dists.view([num_cutouts, sample.shape[0], -1])
|
| 155 |
+
loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
|
| 156 |
+
else:
|
| 157 |
+
loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
|
| 158 |
+
|
| 159 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 160 |
+
|
| 161 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 162 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 163 |
+
noise_pred = noise_pred_original
|
| 164 |
+
else:
|
| 165 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 166 |
+
return noise_pred, latents
|
| 167 |
+
|
| 168 |
+
@torch.no_grad()
|
| 169 |
+
def __call__(
|
| 170 |
+
self,
|
| 171 |
+
prompt: Union[str, List[str]],
|
| 172 |
+
height: Optional[int] = 512,
|
| 173 |
+
width: Optional[int] = 512,
|
| 174 |
+
num_inference_steps: Optional[int] = 50,
|
| 175 |
+
guidance_scale: Optional[float] = 7.5,
|
| 176 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 177 |
+
eta: float = 0.0,
|
| 178 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 179 |
+
clip_prompt: Optional[Union[str, List[str]]] = None,
|
| 180 |
+
num_cutouts: Optional[int] = 4,
|
| 181 |
+
use_cutouts: Optional[bool] = True,
|
| 182 |
+
generator: Optional[torch.Generator] = None,
|
| 183 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 184 |
+
output_type: Optional[str] = "pil",
|
| 185 |
+
return_dict: bool = True,
|
| 186 |
+
):
|
| 187 |
+
if isinstance(prompt, str):
|
| 188 |
+
batch_size = 1
|
| 189 |
+
elif isinstance(prompt, list):
|
| 190 |
+
batch_size = len(prompt)
|
| 191 |
+
else:
|
| 192 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 193 |
+
|
| 194 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 195 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 196 |
+
|
| 197 |
+
# get prompt text embeddings
|
| 198 |
+
text_input = self.tokenizer(
|
| 199 |
+
prompt,
|
| 200 |
+
padding="max_length",
|
| 201 |
+
max_length=self.tokenizer.model_max_length,
|
| 202 |
+
truncation=True,
|
| 203 |
+
return_tensors="pt",
|
| 204 |
+
)
|
| 205 |
+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 206 |
+
# duplicate text embeddings for each generation per prompt
|
| 207 |
+
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 208 |
+
|
| 209 |
+
if clip_guidance_scale > 0:
|
| 210 |
+
if clip_prompt is not None:
|
| 211 |
+
clip_text_input = self.tokenizer(
|
| 212 |
+
clip_prompt,
|
| 213 |
+
padding="max_length",
|
| 214 |
+
max_length=self.tokenizer.model_max_length,
|
| 215 |
+
truncation=True,
|
| 216 |
+
return_tensors="pt",
|
| 217 |
+
).input_ids.to(self.device)
|
| 218 |
+
else:
|
| 219 |
+
clip_text_input = text_input.input_ids.to(self.device)
|
| 220 |
+
text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
|
| 221 |
+
text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 222 |
+
# duplicate text embeddings clip for each generation per prompt
|
| 223 |
+
text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
|
| 224 |
+
|
| 225 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 226 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 227 |
+
# corresponds to doing no classifier free guidance.
|
| 228 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 229 |
+
# get unconditional embeddings for classifier free guidance
|
| 230 |
+
if do_classifier_free_guidance:
|
| 231 |
+
max_length = text_input.input_ids.shape[-1]
|
| 232 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 233 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 234 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 235 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 236 |
+
|
| 237 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 238 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 239 |
+
# to avoid doing two forward passes
|
| 240 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 241 |
+
|
| 242 |
+
# get the initial random noise unless the user supplied it
|
| 243 |
+
|
| 244 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 245 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 246 |
+
# However this currently doesn't work in `mps`.
|
| 247 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 248 |
+
latents_dtype = text_embeddings.dtype
|
| 249 |
+
if latents is None:
|
| 250 |
+
if self.device.type == "mps":
|
| 251 |
+
# randn does not work reproducibly on mps
|
| 252 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 253 |
+
self.device
|
| 254 |
+
)
|
| 255 |
+
else:
|
| 256 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 257 |
+
else:
|
| 258 |
+
if latents.shape != latents_shape:
|
| 259 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 260 |
+
latents = latents.to(self.device)
|
| 261 |
+
|
| 262 |
+
# set timesteps
|
| 263 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 264 |
+
extra_set_kwargs = {}
|
| 265 |
+
if accepts_offset:
|
| 266 |
+
extra_set_kwargs["offset"] = 1
|
| 267 |
+
|
| 268 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 269 |
+
|
| 270 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 271 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 272 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 273 |
+
|
| 274 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 275 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 276 |
+
|
| 277 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 278 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 279 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 280 |
+
# and should be between [0, 1]
|
| 281 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 282 |
+
extra_step_kwargs = {}
|
| 283 |
+
if accepts_eta:
|
| 284 |
+
extra_step_kwargs["eta"] = eta
|
| 285 |
+
|
| 286 |
+
# check if the scheduler accepts generator
|
| 287 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 288 |
+
if accepts_generator:
|
| 289 |
+
extra_step_kwargs["generator"] = generator
|
| 290 |
+
|
| 291 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 292 |
+
# expand the latents if we are doing classifier free guidance
|
| 293 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 294 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 295 |
+
|
| 296 |
+
# predict the noise residual
|
| 297 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 298 |
+
|
| 299 |
+
# perform classifier free guidance
|
| 300 |
+
if do_classifier_free_guidance:
|
| 301 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 302 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 303 |
+
|
| 304 |
+
# perform clip guidance
|
| 305 |
+
if clip_guidance_scale > 0:
|
| 306 |
+
text_embeddings_for_guidance = (
|
| 307 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 308 |
+
)
|
| 309 |
+
noise_pred, latents = self.cond_fn(
|
| 310 |
+
latents,
|
| 311 |
+
t,
|
| 312 |
+
i,
|
| 313 |
+
text_embeddings_for_guidance,
|
| 314 |
+
noise_pred,
|
| 315 |
+
text_embeddings_clip,
|
| 316 |
+
clip_guidance_scale,
|
| 317 |
+
num_cutouts,
|
| 318 |
+
use_cutouts,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 322 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 323 |
+
|
| 324 |
+
# scale and decode the image latents with vae
|
| 325 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 326 |
+
image = self.vae.decode(latents).sample
|
| 327 |
+
|
| 328 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 329 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 330 |
+
|
| 331 |
+
if output_type == "pil":
|
| 332 |
+
image = self.numpy_to_pil(image)
|
| 333 |
+
|
| 334 |
+
if not return_dict:
|
| 335 |
+
return (image, None)
|
| 336 |
+
|
| 337 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.27.0/clip_guided_stable_diffusion_img2img.py
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers import (
|
| 13 |
+
AutoencoderKL,
|
| 14 |
+
DDIMScheduler,
|
| 15 |
+
DPMSolverMultistepScheduler,
|
| 16 |
+
LMSDiscreteScheduler,
|
| 17 |
+
PNDMScheduler,
|
| 18 |
+
UNet2DConditionModel,
|
| 19 |
+
)
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 21 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 22 |
+
from diffusers.utils import PIL_INTERPOLATION, deprecate
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
EXAMPLE_DOC_STRING = """
|
| 27 |
+
Examples:
|
| 28 |
+
```
|
| 29 |
+
from io import BytesIO
|
| 30 |
+
|
| 31 |
+
import requests
|
| 32 |
+
import torch
|
| 33 |
+
from diffusers import DiffusionPipeline
|
| 34 |
+
from PIL import Image
|
| 35 |
+
from transformers import CLIPFeatureExtractor, CLIPModel
|
| 36 |
+
|
| 37 |
+
feature_extractor = CLIPFeatureExtractor.from_pretrained(
|
| 38 |
+
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
| 39 |
+
)
|
| 40 |
+
clip_model = CLIPModel.from_pretrained(
|
| 41 |
+
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
guided_pipeline = DiffusionPipeline.from_pretrained(
|
| 46 |
+
"CompVis/stable-diffusion-v1-4",
|
| 47 |
+
# custom_pipeline="clip_guided_stable_diffusion",
|
| 48 |
+
custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
|
| 49 |
+
clip_model=clip_model,
|
| 50 |
+
feature_extractor=feature_extractor,
|
| 51 |
+
torch_dtype=torch.float16,
|
| 52 |
+
)
|
| 53 |
+
guided_pipeline.enable_attention_slicing()
|
| 54 |
+
guided_pipeline = guided_pipeline.to("cuda")
|
| 55 |
+
|
| 56 |
+
prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
|
| 57 |
+
|
| 58 |
+
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
| 59 |
+
|
| 60 |
+
response = requests.get(url)
|
| 61 |
+
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
| 62 |
+
|
| 63 |
+
image = guided_pipeline(
|
| 64 |
+
prompt=prompt,
|
| 65 |
+
num_inference_steps=30,
|
| 66 |
+
image=init_image,
|
| 67 |
+
strength=0.75,
|
| 68 |
+
guidance_scale=7.5,
|
| 69 |
+
clip_guidance_scale=100,
|
| 70 |
+
num_cutouts=4,
|
| 71 |
+
use_cutouts=False,
|
| 72 |
+
).images[0]
|
| 73 |
+
display(image)
|
| 74 |
+
```
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def preprocess(image, w, h):
|
| 79 |
+
if isinstance(image, torch.Tensor):
|
| 80 |
+
return image
|
| 81 |
+
elif isinstance(image, PIL.Image.Image):
|
| 82 |
+
image = [image]
|
| 83 |
+
|
| 84 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 85 |
+
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
| 86 |
+
image = np.concatenate(image, axis=0)
|
| 87 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 88 |
+
image = image.transpose(0, 3, 1, 2)
|
| 89 |
+
image = 2.0 * image - 1.0
|
| 90 |
+
image = torch.from_numpy(image)
|
| 91 |
+
elif isinstance(image[0], torch.Tensor):
|
| 92 |
+
image = torch.cat(image, dim=0)
|
| 93 |
+
return image
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class MakeCutouts(nn.Module):
|
| 97 |
+
def __init__(self, cut_size, cut_power=1.0):
|
| 98 |
+
super().__init__()
|
| 99 |
+
|
| 100 |
+
self.cut_size = cut_size
|
| 101 |
+
self.cut_power = cut_power
|
| 102 |
+
|
| 103 |
+
def forward(self, pixel_values, num_cutouts):
|
| 104 |
+
sideY, sideX = pixel_values.shape[2:4]
|
| 105 |
+
max_size = min(sideX, sideY)
|
| 106 |
+
min_size = min(sideX, sideY, self.cut_size)
|
| 107 |
+
cutouts = []
|
| 108 |
+
for _ in range(num_cutouts):
|
| 109 |
+
size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
|
| 110 |
+
offsetx = torch.randint(0, sideX - size + 1, ())
|
| 111 |
+
offsety = torch.randint(0, sideY - size + 1, ())
|
| 112 |
+
cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
|
| 113 |
+
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
|
| 114 |
+
return torch.cat(cutouts)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def spherical_dist_loss(x, y):
|
| 118 |
+
x = F.normalize(x, dim=-1)
|
| 119 |
+
y = F.normalize(y, dim=-1)
|
| 120 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def set_requires_grad(model, value):
|
| 124 |
+
for param in model.parameters():
|
| 125 |
+
param.requires_grad = value
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 129 |
+
"""CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
|
| 130 |
+
- https://github.com/Jack000/glid-3-xl
|
| 131 |
+
- https://github.dev/crowsonkb/k-diffusion
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
vae: AutoencoderKL,
|
| 137 |
+
text_encoder: CLIPTextModel,
|
| 138 |
+
clip_model: CLIPModel,
|
| 139 |
+
tokenizer: CLIPTokenizer,
|
| 140 |
+
unet: UNet2DConditionModel,
|
| 141 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 142 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 143 |
+
):
|
| 144 |
+
super().__init__()
|
| 145 |
+
self.register_modules(
|
| 146 |
+
vae=vae,
|
| 147 |
+
text_encoder=text_encoder,
|
| 148 |
+
clip_model=clip_model,
|
| 149 |
+
tokenizer=tokenizer,
|
| 150 |
+
unet=unet,
|
| 151 |
+
scheduler=scheduler,
|
| 152 |
+
feature_extractor=feature_extractor,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 156 |
+
self.cut_out_size = (
|
| 157 |
+
feature_extractor.size
|
| 158 |
+
if isinstance(feature_extractor.size, int)
|
| 159 |
+
else feature_extractor.size["shortest_edge"]
|
| 160 |
+
)
|
| 161 |
+
self.make_cutouts = MakeCutouts(self.cut_out_size)
|
| 162 |
+
|
| 163 |
+
set_requires_grad(self.text_encoder, False)
|
| 164 |
+
set_requires_grad(self.clip_model, False)
|
| 165 |
+
|
| 166 |
+
def freeze_vae(self):
|
| 167 |
+
set_requires_grad(self.vae, False)
|
| 168 |
+
|
| 169 |
+
def unfreeze_vae(self):
|
| 170 |
+
set_requires_grad(self.vae, True)
|
| 171 |
+
|
| 172 |
+
def freeze_unet(self):
|
| 173 |
+
set_requires_grad(self.unet, False)
|
| 174 |
+
|
| 175 |
+
def unfreeze_unet(self):
|
| 176 |
+
set_requires_grad(self.unet, True)
|
| 177 |
+
|
| 178 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 179 |
+
# get the original timestep using init_timestep
|
| 180 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 181 |
+
|
| 182 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 183 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 184 |
+
|
| 185 |
+
return timesteps, num_inference_steps - t_start
|
| 186 |
+
|
| 187 |
+
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
| 188 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 189 |
+
raise ValueError(
|
| 190 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
image = image.to(device=device, dtype=dtype)
|
| 194 |
+
|
| 195 |
+
batch_size = batch_size * num_images_per_prompt
|
| 196 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 197 |
+
raise ValueError(
|
| 198 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 199 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
if isinstance(generator, list):
|
| 203 |
+
init_latents = [
|
| 204 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 205 |
+
]
|
| 206 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 207 |
+
else:
|
| 208 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 209 |
+
|
| 210 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 211 |
+
|
| 212 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 213 |
+
# expand init_latents for batch_size
|
| 214 |
+
deprecation_message = (
|
| 215 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 216 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 217 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 218 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 219 |
+
)
|
| 220 |
+
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 221 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 222 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 223 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 224 |
+
raise ValueError(
|
| 225 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 226 |
+
)
|
| 227 |
+
else:
|
| 228 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 229 |
+
|
| 230 |
+
shape = init_latents.shape
|
| 231 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 232 |
+
|
| 233 |
+
# get latents
|
| 234 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 235 |
+
latents = init_latents
|
| 236 |
+
|
| 237 |
+
return latents
|
| 238 |
+
|
| 239 |
+
@torch.enable_grad()
|
| 240 |
+
def cond_fn(
|
| 241 |
+
self,
|
| 242 |
+
latents,
|
| 243 |
+
timestep,
|
| 244 |
+
index,
|
| 245 |
+
text_embeddings,
|
| 246 |
+
noise_pred_original,
|
| 247 |
+
text_embeddings_clip,
|
| 248 |
+
clip_guidance_scale,
|
| 249 |
+
num_cutouts,
|
| 250 |
+
use_cutouts=True,
|
| 251 |
+
):
|
| 252 |
+
latents = latents.detach().requires_grad_()
|
| 253 |
+
|
| 254 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 255 |
+
|
| 256 |
+
# predict the noise residual
|
| 257 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 258 |
+
|
| 259 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 260 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 261 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 262 |
+
# compute predicted original sample from predicted noise also called
|
| 263 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 264 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 265 |
+
|
| 266 |
+
fac = torch.sqrt(beta_prod_t)
|
| 267 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 268 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 269 |
+
sigma = self.scheduler.sigmas[index]
|
| 270 |
+
sample = latents - sigma * noise_pred
|
| 271 |
+
else:
|
| 272 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 273 |
+
|
| 274 |
+
sample = 1 / self.vae.config.scaling_factor * sample
|
| 275 |
+
image = self.vae.decode(sample).sample
|
| 276 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 277 |
+
|
| 278 |
+
if use_cutouts:
|
| 279 |
+
image = self.make_cutouts(image, num_cutouts)
|
| 280 |
+
else:
|
| 281 |
+
image = transforms.Resize(self.cut_out_size)(image)
|
| 282 |
+
image = self.normalize(image).to(latents.dtype)
|
| 283 |
+
|
| 284 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 285 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 286 |
+
|
| 287 |
+
if use_cutouts:
|
| 288 |
+
dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
|
| 289 |
+
dists = dists.view([num_cutouts, sample.shape[0], -1])
|
| 290 |
+
loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
|
| 291 |
+
else:
|
| 292 |
+
loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
|
| 293 |
+
|
| 294 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 295 |
+
|
| 296 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 297 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 298 |
+
noise_pred = noise_pred_original
|
| 299 |
+
else:
|
| 300 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 301 |
+
return noise_pred, latents
|
| 302 |
+
|
| 303 |
+
@torch.no_grad()
|
| 304 |
+
def __call__(
|
| 305 |
+
self,
|
| 306 |
+
prompt: Union[str, List[str]],
|
| 307 |
+
height: Optional[int] = 512,
|
| 308 |
+
width: Optional[int] = 512,
|
| 309 |
+
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 310 |
+
strength: float = 0.8,
|
| 311 |
+
num_inference_steps: Optional[int] = 50,
|
| 312 |
+
guidance_scale: Optional[float] = 7.5,
|
| 313 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 314 |
+
eta: float = 0.0,
|
| 315 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 316 |
+
clip_prompt: Optional[Union[str, List[str]]] = None,
|
| 317 |
+
num_cutouts: Optional[int] = 4,
|
| 318 |
+
use_cutouts: Optional[bool] = True,
|
| 319 |
+
generator: Optional[torch.Generator] = None,
|
| 320 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 321 |
+
output_type: Optional[str] = "pil",
|
| 322 |
+
return_dict: bool = True,
|
| 323 |
+
):
|
| 324 |
+
if isinstance(prompt, str):
|
| 325 |
+
batch_size = 1
|
| 326 |
+
elif isinstance(prompt, list):
|
| 327 |
+
batch_size = len(prompt)
|
| 328 |
+
else:
|
| 329 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 330 |
+
|
| 331 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 332 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 333 |
+
|
| 334 |
+
# get prompt text embeddings
|
| 335 |
+
text_input = self.tokenizer(
|
| 336 |
+
prompt,
|
| 337 |
+
padding="max_length",
|
| 338 |
+
max_length=self.tokenizer.model_max_length,
|
| 339 |
+
truncation=True,
|
| 340 |
+
return_tensors="pt",
|
| 341 |
+
)
|
| 342 |
+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 343 |
+
# duplicate text embeddings for each generation per prompt
|
| 344 |
+
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 345 |
+
|
| 346 |
+
# set timesteps
|
| 347 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 348 |
+
extra_set_kwargs = {}
|
| 349 |
+
if accepts_offset:
|
| 350 |
+
extra_set_kwargs["offset"] = 1
|
| 351 |
+
|
| 352 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 353 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 354 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 355 |
+
self.scheduler.timesteps.to(self.device)
|
| 356 |
+
|
| 357 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
|
| 358 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 359 |
+
|
| 360 |
+
# Preprocess image
|
| 361 |
+
image = preprocess(image, width, height)
|
| 362 |
+
latents = self.prepare_latents(
|
| 363 |
+
image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
if clip_guidance_scale > 0:
|
| 367 |
+
if clip_prompt is not None:
|
| 368 |
+
clip_text_input = self.tokenizer(
|
| 369 |
+
clip_prompt,
|
| 370 |
+
padding="max_length",
|
| 371 |
+
max_length=self.tokenizer.model_max_length,
|
| 372 |
+
truncation=True,
|
| 373 |
+
return_tensors="pt",
|
| 374 |
+
).input_ids.to(self.device)
|
| 375 |
+
else:
|
| 376 |
+
clip_text_input = text_input.input_ids.to(self.device)
|
| 377 |
+
text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
|
| 378 |
+
text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 379 |
+
# duplicate text embeddings clip for each generation per prompt
|
| 380 |
+
text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
|
| 381 |
+
|
| 382 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 383 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 384 |
+
# corresponds to doing no classifier free guidance.
|
| 385 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 386 |
+
# get unconditional embeddings for classifier free guidance
|
| 387 |
+
if do_classifier_free_guidance:
|
| 388 |
+
max_length = text_input.input_ids.shape[-1]
|
| 389 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 390 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 391 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 392 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 393 |
+
|
| 394 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 395 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 396 |
+
# to avoid doing two forward passes
|
| 397 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 398 |
+
|
| 399 |
+
# get the initial random noise unless the user supplied it
|
| 400 |
+
|
| 401 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 402 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 403 |
+
# However this currently doesn't work in `mps`.
|
| 404 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 405 |
+
latents_dtype = text_embeddings.dtype
|
| 406 |
+
if latents is None:
|
| 407 |
+
if self.device.type == "mps":
|
| 408 |
+
# randn does not work reproducibly on mps
|
| 409 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 410 |
+
self.device
|
| 411 |
+
)
|
| 412 |
+
else:
|
| 413 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 414 |
+
else:
|
| 415 |
+
if latents.shape != latents_shape:
|
| 416 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 417 |
+
latents = latents.to(self.device)
|
| 418 |
+
|
| 419 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 420 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 421 |
+
|
| 422 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 423 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 424 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 425 |
+
# and should be between [0, 1]
|
| 426 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 427 |
+
extra_step_kwargs = {}
|
| 428 |
+
if accepts_eta:
|
| 429 |
+
extra_step_kwargs["eta"] = eta
|
| 430 |
+
|
| 431 |
+
# check if the scheduler accepts generator
|
| 432 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 433 |
+
if accepts_generator:
|
| 434 |
+
extra_step_kwargs["generator"] = generator
|
| 435 |
+
|
| 436 |
+
with self.progress_bar(total=num_inference_steps):
|
| 437 |
+
for i, t in enumerate(timesteps):
|
| 438 |
+
# expand the latents if we are doing classifier free guidance
|
| 439 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 440 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 441 |
+
|
| 442 |
+
# predict the noise residual
|
| 443 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 444 |
+
|
| 445 |
+
# perform classifier free guidance
|
| 446 |
+
if do_classifier_free_guidance:
|
| 447 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 448 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 449 |
+
|
| 450 |
+
# perform clip guidance
|
| 451 |
+
if clip_guidance_scale > 0:
|
| 452 |
+
text_embeddings_for_guidance = (
|
| 453 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 454 |
+
)
|
| 455 |
+
noise_pred, latents = self.cond_fn(
|
| 456 |
+
latents,
|
| 457 |
+
t,
|
| 458 |
+
i,
|
| 459 |
+
text_embeddings_for_guidance,
|
| 460 |
+
noise_pred,
|
| 461 |
+
text_embeddings_clip,
|
| 462 |
+
clip_guidance_scale,
|
| 463 |
+
num_cutouts,
|
| 464 |
+
use_cutouts,
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 468 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 469 |
+
|
| 470 |
+
# scale and decode the image latents with vae
|
| 471 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 472 |
+
image = self.vae.decode(latents).sample
|
| 473 |
+
|
| 474 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 475 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 476 |
+
|
| 477 |
+
if output_type == "pil":
|
| 478 |
+
image = self.numpy_to_pil(image)
|
| 479 |
+
|
| 480 |
+
if not return_dict:
|
| 481 |
+
return (image, None)
|
| 482 |
+
|
| 483 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.27.0/composable_stable_diffusion.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Callable, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from packaging import version
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 21 |
+
|
| 22 |
+
from diffusers import DiffusionPipeline
|
| 23 |
+
from diffusers.configuration_utils import FrozenDict
|
| 24 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 25 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 26 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 27 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 28 |
+
from diffusers.schedulers import (
|
| 29 |
+
DDIMScheduler,
|
| 30 |
+
DPMSolverMultistepScheduler,
|
| 31 |
+
EulerAncestralDiscreteScheduler,
|
| 32 |
+
EulerDiscreteScheduler,
|
| 33 |
+
LMSDiscreteScheduler,
|
| 34 |
+
PNDMScheduler,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.utils import deprecate, logging
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 43 |
+
r"""
|
| 44 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 45 |
+
|
| 46 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 47 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
vae ([`AutoencoderKL`]):
|
| 51 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 52 |
+
text_encoder ([`CLIPTextModel`]):
|
| 53 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 54 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 55 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 56 |
+
tokenizer (`CLIPTokenizer`):
|
| 57 |
+
Tokenizer of class
|
| 58 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 59 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 60 |
+
scheduler ([`SchedulerMixin`]):
|
| 61 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 62 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 63 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 64 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 65 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 66 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 67 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
vae: AutoencoderKL,
|
| 75 |
+
text_encoder: CLIPTextModel,
|
| 76 |
+
tokenizer: CLIPTokenizer,
|
| 77 |
+
unet: UNet2DConditionModel,
|
| 78 |
+
scheduler: Union[
|
| 79 |
+
DDIMScheduler,
|
| 80 |
+
PNDMScheduler,
|
| 81 |
+
LMSDiscreteScheduler,
|
| 82 |
+
EulerDiscreteScheduler,
|
| 83 |
+
EulerAncestralDiscreteScheduler,
|
| 84 |
+
DPMSolverMultistepScheduler,
|
| 85 |
+
],
|
| 86 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 87 |
+
feature_extractor: CLIPImageProcessor,
|
| 88 |
+
requires_safety_checker: bool = True,
|
| 89 |
+
):
|
| 90 |
+
super().__init__()
|
| 91 |
+
|
| 92 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 93 |
+
deprecation_message = (
|
| 94 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 95 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 96 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 97 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 98 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 99 |
+
" file"
|
| 100 |
+
)
|
| 101 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 102 |
+
new_config = dict(scheduler.config)
|
| 103 |
+
new_config["steps_offset"] = 1
|
| 104 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 105 |
+
|
| 106 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 107 |
+
deprecation_message = (
|
| 108 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 109 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 110 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 111 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 112 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 113 |
+
)
|
| 114 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 115 |
+
new_config = dict(scheduler.config)
|
| 116 |
+
new_config["clip_sample"] = False
|
| 117 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 118 |
+
|
| 119 |
+
if safety_checker is None and requires_safety_checker:
|
| 120 |
+
logger.warning(
|
| 121 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 122 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 123 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 124 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 125 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 126 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
if safety_checker is not None and feature_extractor is None:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 132 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 136 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 137 |
+
) < version.parse("0.9.0.dev0")
|
| 138 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 139 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 140 |
+
deprecation_message = (
|
| 141 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 142 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 143 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 144 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 145 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 146 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 147 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 148 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 149 |
+
" the `unet/config.json` file"
|
| 150 |
+
)
|
| 151 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 152 |
+
new_config = dict(unet.config)
|
| 153 |
+
new_config["sample_size"] = 64
|
| 154 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 155 |
+
|
| 156 |
+
self.register_modules(
|
| 157 |
+
vae=vae,
|
| 158 |
+
text_encoder=text_encoder,
|
| 159 |
+
tokenizer=tokenizer,
|
| 160 |
+
unet=unet,
|
| 161 |
+
scheduler=scheduler,
|
| 162 |
+
safety_checker=safety_checker,
|
| 163 |
+
feature_extractor=feature_extractor,
|
| 164 |
+
)
|
| 165 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 166 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 167 |
+
|
| 168 |
+
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
| 169 |
+
r"""
|
| 170 |
+
Encodes the prompt into text encoder hidden states.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
prompt (`str` or `list(int)`):
|
| 174 |
+
prompt to be encoded
|
| 175 |
+
device: (`torch.device`):
|
| 176 |
+
torch device
|
| 177 |
+
num_images_per_prompt (`int`):
|
| 178 |
+
number of images that should be generated per prompt
|
| 179 |
+
do_classifier_free_guidance (`bool`):
|
| 180 |
+
whether to use classifier free guidance or not
|
| 181 |
+
negative_prompt (`str` or `List[str]`):
|
| 182 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 183 |
+
if `guidance_scale` is less than `1`).
|
| 184 |
+
"""
|
| 185 |
+
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
| 186 |
+
|
| 187 |
+
text_inputs = self.tokenizer(
|
| 188 |
+
prompt,
|
| 189 |
+
padding="max_length",
|
| 190 |
+
max_length=self.tokenizer.model_max_length,
|
| 191 |
+
truncation=True,
|
| 192 |
+
return_tensors="pt",
|
| 193 |
+
)
|
| 194 |
+
text_input_ids = text_inputs.input_ids
|
| 195 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 196 |
+
|
| 197 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 198 |
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
| 199 |
+
logger.warning(
|
| 200 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 201 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 205 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 206 |
+
else:
|
| 207 |
+
attention_mask = None
|
| 208 |
+
|
| 209 |
+
text_embeddings = self.text_encoder(
|
| 210 |
+
text_input_ids.to(device),
|
| 211 |
+
attention_mask=attention_mask,
|
| 212 |
+
)
|
| 213 |
+
text_embeddings = text_embeddings[0]
|
| 214 |
+
|
| 215 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 216 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 217 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 218 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 219 |
+
|
| 220 |
+
# get unconditional embeddings for classifier free guidance
|
| 221 |
+
if do_classifier_free_guidance:
|
| 222 |
+
uncond_tokens: List[str]
|
| 223 |
+
if negative_prompt is None:
|
| 224 |
+
uncond_tokens = [""] * batch_size
|
| 225 |
+
elif type(prompt) is not type(negative_prompt):
|
| 226 |
+
raise TypeError(
|
| 227 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 228 |
+
f" {type(prompt)}."
|
| 229 |
+
)
|
| 230 |
+
elif isinstance(negative_prompt, str):
|
| 231 |
+
uncond_tokens = [negative_prompt]
|
| 232 |
+
elif batch_size != len(negative_prompt):
|
| 233 |
+
raise ValueError(
|
| 234 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 235 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 236 |
+
" the batch size of `prompt`."
|
| 237 |
+
)
|
| 238 |
+
else:
|
| 239 |
+
uncond_tokens = negative_prompt
|
| 240 |
+
|
| 241 |
+
max_length = text_input_ids.shape[-1]
|
| 242 |
+
uncond_input = self.tokenizer(
|
| 243 |
+
uncond_tokens,
|
| 244 |
+
padding="max_length",
|
| 245 |
+
max_length=max_length,
|
| 246 |
+
truncation=True,
|
| 247 |
+
return_tensors="pt",
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 251 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 252 |
+
else:
|
| 253 |
+
attention_mask = None
|
| 254 |
+
|
| 255 |
+
uncond_embeddings = self.text_encoder(
|
| 256 |
+
uncond_input.input_ids.to(device),
|
| 257 |
+
attention_mask=attention_mask,
|
| 258 |
+
)
|
| 259 |
+
uncond_embeddings = uncond_embeddings[0]
|
| 260 |
+
|
| 261 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 262 |
+
seq_len = uncond_embeddings.shape[1]
|
| 263 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 264 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 265 |
+
|
| 266 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 267 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 268 |
+
# to avoid doing two forward passes
|
| 269 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 270 |
+
|
| 271 |
+
return text_embeddings
|
| 272 |
+
|
| 273 |
+
def run_safety_checker(self, image, device, dtype):
|
| 274 |
+
if self.safety_checker is not None:
|
| 275 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 276 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 277 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 278 |
+
)
|
| 279 |
+
else:
|
| 280 |
+
has_nsfw_concept = None
|
| 281 |
+
return image, has_nsfw_concept
|
| 282 |
+
|
| 283 |
+
def decode_latents(self, latents):
|
| 284 |
+
latents = 1 / 0.18215 * latents
|
| 285 |
+
image = self.vae.decode(latents).sample
|
| 286 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 287 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 288 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 289 |
+
return image
|
| 290 |
+
|
| 291 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 292 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 293 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 294 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 295 |
+
# and should be between [0, 1]
|
| 296 |
+
|
| 297 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 298 |
+
extra_step_kwargs = {}
|
| 299 |
+
if accepts_eta:
|
| 300 |
+
extra_step_kwargs["eta"] = eta
|
| 301 |
+
|
| 302 |
+
# check if the scheduler accepts generator
|
| 303 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 304 |
+
if accepts_generator:
|
| 305 |
+
extra_step_kwargs["generator"] = generator
|
| 306 |
+
return extra_step_kwargs
|
| 307 |
+
|
| 308 |
+
def check_inputs(self, prompt, height, width, callback_steps):
|
| 309 |
+
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
| 310 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 311 |
+
|
| 312 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 313 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 314 |
+
|
| 315 |
+
if (callback_steps is None) or (
|
| 316 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 317 |
+
):
|
| 318 |
+
raise ValueError(
|
| 319 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 320 |
+
f" {type(callback_steps)}."
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 324 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 325 |
+
if latents is None:
|
| 326 |
+
if device.type == "mps":
|
| 327 |
+
# randn does not work reproducibly on mps
|
| 328 |
+
latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
|
| 329 |
+
else:
|
| 330 |
+
latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
|
| 331 |
+
else:
|
| 332 |
+
if latents.shape != shape:
|
| 333 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 334 |
+
latents = latents.to(device)
|
| 335 |
+
|
| 336 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 337 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 338 |
+
return latents
|
| 339 |
+
|
| 340 |
+
@torch.no_grad()
|
| 341 |
+
def __call__(
|
| 342 |
+
self,
|
| 343 |
+
prompt: Union[str, List[str]],
|
| 344 |
+
height: Optional[int] = None,
|
| 345 |
+
width: Optional[int] = None,
|
| 346 |
+
num_inference_steps: int = 50,
|
| 347 |
+
guidance_scale: float = 7.5,
|
| 348 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 349 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 350 |
+
eta: float = 0.0,
|
| 351 |
+
generator: Optional[torch.Generator] = None,
|
| 352 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 353 |
+
output_type: Optional[str] = "pil",
|
| 354 |
+
return_dict: bool = True,
|
| 355 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 356 |
+
callback_steps: int = 1,
|
| 357 |
+
weights: Optional[str] = "",
|
| 358 |
+
):
|
| 359 |
+
r"""
|
| 360 |
+
Function invoked when calling the pipeline for generation.
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
prompt (`str` or `List[str]`):
|
| 364 |
+
The prompt or prompts to guide the image generation.
|
| 365 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 366 |
+
The height in pixels of the generated image.
|
| 367 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 368 |
+
The width in pixels of the generated image.
|
| 369 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 370 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 371 |
+
expense of slower inference.
|
| 372 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 373 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 374 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 375 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 376 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 377 |
+
usually at the expense of lower image quality.
|
| 378 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 379 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 380 |
+
if `guidance_scale` is less than `1`).
|
| 381 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 382 |
+
The number of images to generate per prompt.
|
| 383 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 384 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 385 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 386 |
+
generator (`torch.Generator`, *optional*):
|
| 387 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 388 |
+
deterministic.
|
| 389 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 390 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 391 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 392 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 393 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 394 |
+
The output format of the generate image. Choose between
|
| 395 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 396 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 397 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 398 |
+
plain tuple.
|
| 399 |
+
callback (`Callable`, *optional*):
|
| 400 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 401 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 402 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 403 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 404 |
+
called at every step.
|
| 405 |
+
|
| 406 |
+
Returns:
|
| 407 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 408 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 409 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 410 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 411 |
+
(nsfw) content, according to the `safety_checker`.
|
| 412 |
+
"""
|
| 413 |
+
# 0. Default height and width to unet
|
| 414 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 415 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 416 |
+
|
| 417 |
+
# 1. Check inputs. Raise error if not correct
|
| 418 |
+
self.check_inputs(prompt, height, width, callback_steps)
|
| 419 |
+
|
| 420 |
+
# 2. Define call parameters
|
| 421 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
| 422 |
+
device = self._execution_device
|
| 423 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 424 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 425 |
+
# corresponds to doing no classifier free guidance.
|
| 426 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 427 |
+
|
| 428 |
+
if "|" in prompt:
|
| 429 |
+
prompt = [x.strip() for x in prompt.split("|")]
|
| 430 |
+
print(f"composing {prompt}...")
|
| 431 |
+
|
| 432 |
+
if not weights:
|
| 433 |
+
# specify weights for prompts (excluding the unconditional score)
|
| 434 |
+
print("using equal positive weights (conjunction) for all prompts...")
|
| 435 |
+
weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
|
| 436 |
+
else:
|
| 437 |
+
# set prompt weight for each
|
| 438 |
+
num_prompts = len(prompt) if isinstance(prompt, list) else 1
|
| 439 |
+
weights = [float(w.strip()) for w in weights.split("|")]
|
| 440 |
+
# guidance scale as the default
|
| 441 |
+
if len(weights) < num_prompts:
|
| 442 |
+
weights.append(guidance_scale)
|
| 443 |
+
else:
|
| 444 |
+
weights = weights[:num_prompts]
|
| 445 |
+
assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
|
| 446 |
+
weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
|
| 447 |
+
else:
|
| 448 |
+
weights = guidance_scale
|
| 449 |
+
|
| 450 |
+
# 3. Encode input prompt
|
| 451 |
+
text_embeddings = self._encode_prompt(
|
| 452 |
+
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
# 4. Prepare timesteps
|
| 456 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 457 |
+
timesteps = self.scheduler.timesteps
|
| 458 |
+
|
| 459 |
+
# 5. Prepare latent variables
|
| 460 |
+
num_channels_latents = self.unet.config.in_channels
|
| 461 |
+
latents = self.prepare_latents(
|
| 462 |
+
batch_size * num_images_per_prompt,
|
| 463 |
+
num_channels_latents,
|
| 464 |
+
height,
|
| 465 |
+
width,
|
| 466 |
+
text_embeddings.dtype,
|
| 467 |
+
device,
|
| 468 |
+
generator,
|
| 469 |
+
latents,
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
# composable diffusion
|
| 473 |
+
if isinstance(prompt, list) and batch_size == 1:
|
| 474 |
+
# remove extra unconditional embedding
|
| 475 |
+
# N = one unconditional embed + conditional embeds
|
| 476 |
+
text_embeddings = text_embeddings[len(prompt) - 1 :]
|
| 477 |
+
|
| 478 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 479 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 480 |
+
|
| 481 |
+
# 7. Denoising loop
|
| 482 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 483 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 484 |
+
for i, t in enumerate(timesteps):
|
| 485 |
+
# expand the latents if we are doing classifier free guidance
|
| 486 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 487 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 488 |
+
|
| 489 |
+
# predict the noise residual
|
| 490 |
+
noise_pred = []
|
| 491 |
+
for j in range(text_embeddings.shape[0]):
|
| 492 |
+
noise_pred.append(
|
| 493 |
+
self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
|
| 494 |
+
)
|
| 495 |
+
noise_pred = torch.cat(noise_pred, dim=0)
|
| 496 |
+
|
| 497 |
+
# perform guidance
|
| 498 |
+
if do_classifier_free_guidance:
|
| 499 |
+
noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
|
| 500 |
+
noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
|
| 501 |
+
dim=0, keepdims=True
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 505 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 506 |
+
|
| 507 |
+
# call the callback, if provided
|
| 508 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 509 |
+
progress_bar.update()
|
| 510 |
+
if callback is not None and i % callback_steps == 0:
|
| 511 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 512 |
+
callback(step_idx, t, latents)
|
| 513 |
+
|
| 514 |
+
# 8. Post-processing
|
| 515 |
+
image = self.decode_latents(latents)
|
| 516 |
+
|
| 517 |
+
# 9. Run safety checker
|
| 518 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
|
| 519 |
+
|
| 520 |
+
# 10. Convert to PIL
|
| 521 |
+
if output_type == "pil":
|
| 522 |
+
image = self.numpy_to_pil(image)
|
| 523 |
+
|
| 524 |
+
if not return_dict:
|
| 525 |
+
return (image, has_nsfw_concept)
|
| 526 |
+
|
| 527 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/ddim_noise_comparative_analysis.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import List, Optional, Tuple, Union
|
| 16 |
+
|
| 17 |
+
import PIL.Image
|
| 18 |
+
import torch
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
|
| 21 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
| 22 |
+
from diffusers.schedulers import DDIMScheduler
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
trans = transforms.Compose(
|
| 27 |
+
[
|
| 28 |
+
transforms.Resize((256, 256)),
|
| 29 |
+
transforms.ToTensor(),
|
| 30 |
+
transforms.Normalize([0.5], [0.5]),
|
| 31 |
+
]
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def preprocess(image):
|
| 36 |
+
if isinstance(image, torch.Tensor):
|
| 37 |
+
return image
|
| 38 |
+
elif isinstance(image, PIL.Image.Image):
|
| 39 |
+
image = [image]
|
| 40 |
+
|
| 41 |
+
image = [trans(img.convert("RGB")) for img in image]
|
| 42 |
+
image = torch.stack(image)
|
| 43 |
+
return image
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
|
| 47 |
+
r"""
|
| 48 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 49 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 50 |
+
|
| 51 |
+
Parameters:
|
| 52 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 53 |
+
scheduler ([`SchedulerMixin`]):
|
| 54 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 55 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, unet, scheduler):
|
| 59 |
+
super().__init__()
|
| 60 |
+
|
| 61 |
+
# make sure scheduler can always be converted to DDIM
|
| 62 |
+
scheduler = DDIMScheduler.from_config(scheduler.config)
|
| 63 |
+
|
| 64 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 65 |
+
|
| 66 |
+
def check_inputs(self, strength):
|
| 67 |
+
if strength < 0 or strength > 1:
|
| 68 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 69 |
+
|
| 70 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 71 |
+
# get the original timestep using init_timestep
|
| 72 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 73 |
+
|
| 74 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 75 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 76 |
+
|
| 77 |
+
return timesteps, num_inference_steps - t_start
|
| 78 |
+
|
| 79 |
+
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
|
| 80 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 81 |
+
raise ValueError(
|
| 82 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
init_latents = image.to(device=device, dtype=dtype)
|
| 86 |
+
|
| 87 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 88 |
+
raise ValueError(
|
| 89 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 90 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
shape = init_latents.shape
|
| 94 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 95 |
+
|
| 96 |
+
# get latents
|
| 97 |
+
print("add noise to latents at timestep", timestep)
|
| 98 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 99 |
+
latents = init_latents
|
| 100 |
+
|
| 101 |
+
return latents
|
| 102 |
+
|
| 103 |
+
@torch.no_grad()
|
| 104 |
+
def __call__(
|
| 105 |
+
self,
|
| 106 |
+
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 107 |
+
strength: float = 0.8,
|
| 108 |
+
batch_size: int = 1,
|
| 109 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 110 |
+
eta: float = 0.0,
|
| 111 |
+
num_inference_steps: int = 50,
|
| 112 |
+
use_clipped_model_output: Optional[bool] = None,
|
| 113 |
+
output_type: Optional[str] = "pil",
|
| 114 |
+
return_dict: bool = True,
|
| 115 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 116 |
+
r"""
|
| 117 |
+
Args:
|
| 118 |
+
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 119 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 120 |
+
process.
|
| 121 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 122 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
|
| 123 |
+
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
|
| 124 |
+
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
|
| 125 |
+
be maximum and the denoising process will run for the full number of iterations specified in
|
| 126 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 127 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 128 |
+
The number of images to generate.
|
| 129 |
+
generator (`torch.Generator`, *optional*):
|
| 130 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 131 |
+
to make generation deterministic.
|
| 132 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 133 |
+
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
|
| 134 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 135 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 136 |
+
expense of slower inference.
|
| 137 |
+
use_clipped_model_output (`bool`, *optional*, defaults to `None`):
|
| 138 |
+
if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
|
| 139 |
+
downstream to the scheduler. So use `None` for schedulers which don't support this argument.
|
| 140 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 141 |
+
The output format of the generate image. Choose between
|
| 142 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 143 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 144 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
| 148 |
+
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
| 149 |
+
"""
|
| 150 |
+
# 1. Check inputs. Raise error if not correct
|
| 151 |
+
self.check_inputs(strength)
|
| 152 |
+
|
| 153 |
+
# 2. Preprocess image
|
| 154 |
+
image = preprocess(image)
|
| 155 |
+
|
| 156 |
+
# 3. set timesteps
|
| 157 |
+
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
| 158 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
|
| 159 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 160 |
+
|
| 161 |
+
# 4. Prepare latent variables
|
| 162 |
+
latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
|
| 163 |
+
image = latents
|
| 164 |
+
|
| 165 |
+
# 5. Denoising loop
|
| 166 |
+
for t in self.progress_bar(timesteps):
|
| 167 |
+
# 1. predict noise model_output
|
| 168 |
+
model_output = self.unet(image, t).sample
|
| 169 |
+
|
| 170 |
+
# 2. predict previous mean of image x_t-1 and add variance depending on eta
|
| 171 |
+
# eta corresponds to η in paper and should be between [0, 1]
|
| 172 |
+
# do x_t -> x_t-1
|
| 173 |
+
image = self.scheduler.step(
|
| 174 |
+
model_output,
|
| 175 |
+
t,
|
| 176 |
+
image,
|
| 177 |
+
eta=eta,
|
| 178 |
+
use_clipped_model_output=use_clipped_model_output,
|
| 179 |
+
generator=generator,
|
| 180 |
+
).prev_sample
|
| 181 |
+
|
| 182 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 183 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 184 |
+
if output_type == "pil":
|
| 185 |
+
image = self.numpy_to_pil(image)
|
| 186 |
+
|
| 187 |
+
if not return_dict:
|
| 188 |
+
return (image, latent_timestep.item())
|
| 189 |
+
|
| 190 |
+
return ImagePipelineOutput(images=image)
|
v0.27.0/dps_pipeline.py
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from math import pi
|
| 17 |
+
from typing import Callable, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from PIL import Image
|
| 22 |
+
|
| 23 |
+
from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel
|
| 24 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DPSPipeline(DiffusionPipeline):
|
| 28 |
+
r"""
|
| 29 |
+
Pipeline for Diffusion Posterior Sampling.
|
| 30 |
+
|
| 31 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 32 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 33 |
+
|
| 34 |
+
Parameters:
|
| 35 |
+
unet ([`UNet2DModel`]):
|
| 36 |
+
A `UNet2DModel` to denoise the encoded image latents.
|
| 37 |
+
scheduler ([`SchedulerMixin`]):
|
| 38 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 39 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
model_cpu_offload_seq = "unet"
|
| 43 |
+
|
| 44 |
+
def __init__(self, unet, scheduler):
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 47 |
+
|
| 48 |
+
@torch.no_grad()
|
| 49 |
+
def __call__(
|
| 50 |
+
self,
|
| 51 |
+
measurement: torch.Tensor,
|
| 52 |
+
operator: torch.nn.Module,
|
| 53 |
+
loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
|
| 54 |
+
batch_size: int = 1,
|
| 55 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 56 |
+
num_inference_steps: int = 1000,
|
| 57 |
+
output_type: Optional[str] = "pil",
|
| 58 |
+
return_dict: bool = True,
|
| 59 |
+
zeta: float = 0.3,
|
| 60 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 61 |
+
r"""
|
| 62 |
+
The call function to the pipeline for generation.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
measurement (`torch.Tensor`, *required*):
|
| 66 |
+
A 'torch.Tensor', the corrupted image
|
| 67 |
+
operator (`torch.nn.Module`, *required*):
|
| 68 |
+
A 'torch.nn.Module', the operator generating the corrupted image
|
| 69 |
+
loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*):
|
| 70 |
+
A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used
|
| 71 |
+
between the measurements, for most of the cases using RMSE is fine.
|
| 72 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 73 |
+
The number of images to generate.
|
| 74 |
+
generator (`torch.Generator`, *optional*):
|
| 75 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 76 |
+
generation deterministic.
|
| 77 |
+
num_inference_steps (`int`, *optional*, defaults to 1000):
|
| 78 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 79 |
+
expense of slower inference.
|
| 80 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 81 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 82 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 83 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 84 |
+
|
| 85 |
+
Example:
|
| 86 |
+
|
| 87 |
+
```py
|
| 88 |
+
>>> from diffusers import DDPMPipeline
|
| 89 |
+
|
| 90 |
+
>>> # load model and scheduler
|
| 91 |
+
>>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
|
| 92 |
+
|
| 93 |
+
>>> # run pipeline in inference (sample random noise and denoise)
|
| 94 |
+
>>> image = pipe().images[0]
|
| 95 |
+
|
| 96 |
+
>>> # save image
|
| 97 |
+
>>> image.save("ddpm_generated_image.png")
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
| 102 |
+
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
| 103 |
+
returned where the first element is a list with the generated images
|
| 104 |
+
"""
|
| 105 |
+
# Sample gaussian noise to begin loop
|
| 106 |
+
if isinstance(self.unet.config.sample_size, int):
|
| 107 |
+
image_shape = (
|
| 108 |
+
batch_size,
|
| 109 |
+
self.unet.config.in_channels,
|
| 110 |
+
self.unet.config.sample_size,
|
| 111 |
+
self.unet.config.sample_size,
|
| 112 |
+
)
|
| 113 |
+
else:
|
| 114 |
+
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
| 115 |
+
|
| 116 |
+
if self.device.type == "mps":
|
| 117 |
+
# randn does not work reproducibly on mps
|
| 118 |
+
image = randn_tensor(image_shape, generator=generator)
|
| 119 |
+
image = image.to(self.device)
|
| 120 |
+
else:
|
| 121 |
+
image = randn_tensor(image_shape, generator=generator, device=self.device)
|
| 122 |
+
|
| 123 |
+
# set step values
|
| 124 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 125 |
+
|
| 126 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 127 |
+
with torch.enable_grad():
|
| 128 |
+
# 1. predict noise model_output
|
| 129 |
+
image = image.requires_grad_()
|
| 130 |
+
model_output = self.unet(image, t).sample
|
| 131 |
+
|
| 132 |
+
# 2. compute previous image x'_{t-1} and original prediction x0_{t}
|
| 133 |
+
scheduler_out = self.scheduler.step(model_output, t, image, generator=generator)
|
| 134 |
+
image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample
|
| 135 |
+
|
| 136 |
+
# 3. compute y'_t = f(x0_{t})
|
| 137 |
+
measurement_pred = operator(origi_pred)
|
| 138 |
+
|
| 139 |
+
# 4. compute loss = d(y, y'_t-1)
|
| 140 |
+
loss = loss_fn(measurement, measurement_pred)
|
| 141 |
+
loss.backward()
|
| 142 |
+
|
| 143 |
+
print("distance: {0:.4f}".format(loss.item()))
|
| 144 |
+
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
image_pred = image_pred - zeta * image.grad
|
| 147 |
+
image = image_pred.detach()
|
| 148 |
+
|
| 149 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 150 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 151 |
+
if output_type == "pil":
|
| 152 |
+
image = self.numpy_to_pil(image)
|
| 153 |
+
|
| 154 |
+
if not return_dict:
|
| 155 |
+
return (image,)
|
| 156 |
+
|
| 157 |
+
return ImagePipelineOutput(images=image)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
if __name__ == "__main__":
|
| 161 |
+
import scipy
|
| 162 |
+
from torch import nn
|
| 163 |
+
from torchvision.utils import save_image
|
| 164 |
+
|
| 165 |
+
# defining the operators f(.) of y = f(x)
|
| 166 |
+
# super-resolution operator
|
| 167 |
+
class SuperResolutionOperator(nn.Module):
|
| 168 |
+
def __init__(self, in_shape, scale_factor):
|
| 169 |
+
super().__init__()
|
| 170 |
+
|
| 171 |
+
# Resizer local class, do not use outiside the SR operator class
|
| 172 |
+
class Resizer(nn.Module):
|
| 173 |
+
def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True):
|
| 174 |
+
super(Resizer, self).__init__()
|
| 175 |
+
|
| 176 |
+
# First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
|
| 177 |
+
scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
|
| 178 |
+
|
| 179 |
+
# Choose interpolation method, each method has the matching kernel size
|
| 180 |
+
def cubic(x):
|
| 181 |
+
absx = np.abs(x)
|
| 182 |
+
absx2 = absx**2
|
| 183 |
+
absx3 = absx**3
|
| 184 |
+
return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + (
|
| 185 |
+
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
|
| 186 |
+
) * ((1 < absx) & (absx <= 2))
|
| 187 |
+
|
| 188 |
+
def lanczos2(x):
|
| 189 |
+
return (
|
| 190 |
+
(np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps)
|
| 191 |
+
/ ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps)
|
| 192 |
+
) * (abs(x) < 2)
|
| 193 |
+
|
| 194 |
+
def box(x):
|
| 195 |
+
return ((-0.5 <= x) & (x < 0.5)) * 1.0
|
| 196 |
+
|
| 197 |
+
def lanczos3(x):
|
| 198 |
+
return (
|
| 199 |
+
(np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps)
|
| 200 |
+
/ ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps)
|
| 201 |
+
) * (abs(x) < 3)
|
| 202 |
+
|
| 203 |
+
def linear(x):
|
| 204 |
+
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
|
| 205 |
+
|
| 206 |
+
method, kernel_width = {
|
| 207 |
+
"cubic": (cubic, 4.0),
|
| 208 |
+
"lanczos2": (lanczos2, 4.0),
|
| 209 |
+
"lanczos3": (lanczos3, 6.0),
|
| 210 |
+
"box": (box, 1.0),
|
| 211 |
+
"linear": (linear, 2.0),
|
| 212 |
+
None: (cubic, 4.0), # set default interpolation method as cubic
|
| 213 |
+
}.get(kernel)
|
| 214 |
+
|
| 215 |
+
# Antialiasing is only used when downscaling
|
| 216 |
+
antialiasing *= np.any(np.array(scale_factor) < 1)
|
| 217 |
+
|
| 218 |
+
# Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
|
| 219 |
+
sorted_dims = np.argsort(np.array(scale_factor))
|
| 220 |
+
self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
|
| 221 |
+
|
| 222 |
+
# Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
|
| 223 |
+
field_of_view_list = []
|
| 224 |
+
weights_list = []
|
| 225 |
+
for dim in self.sorted_dims:
|
| 226 |
+
# for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
|
| 227 |
+
# weights that multiply the values there to get its result.
|
| 228 |
+
weights, field_of_view = self.contributions(
|
| 229 |
+
in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# convert to torch tensor
|
| 233 |
+
weights = torch.tensor(weights.T, dtype=torch.float32)
|
| 234 |
+
|
| 235 |
+
# We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
|
| 236 |
+
# tmp_im[field_of_view.T], (bsxfun style)
|
| 237 |
+
weights_list.append(
|
| 238 |
+
nn.Parameter(
|
| 239 |
+
torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]),
|
| 240 |
+
requires_grad=False,
|
| 241 |
+
)
|
| 242 |
+
)
|
| 243 |
+
field_of_view_list.append(
|
| 244 |
+
nn.Parameter(
|
| 245 |
+
torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False
|
| 246 |
+
)
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
self.field_of_view = nn.ParameterList(field_of_view_list)
|
| 250 |
+
self.weights = nn.ParameterList(weights_list)
|
| 251 |
+
|
| 252 |
+
def forward(self, in_tensor):
|
| 253 |
+
x = in_tensor
|
| 254 |
+
|
| 255 |
+
# Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
|
| 256 |
+
for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
|
| 257 |
+
# To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
|
| 258 |
+
x = torch.transpose(x, dim, 0)
|
| 259 |
+
|
| 260 |
+
# This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1.
|
| 261 |
+
# for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
|
| 262 |
+
# only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
|
| 263 |
+
# the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
|
| 264 |
+
# matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
|
| 265 |
+
# same number
|
| 266 |
+
x = torch.sum(x[fov] * w, dim=0)
|
| 267 |
+
|
| 268 |
+
# Finally we swap back the axes to the original order
|
| 269 |
+
x = torch.transpose(x, dim, 0)
|
| 270 |
+
|
| 271 |
+
return x
|
| 272 |
+
|
| 273 |
+
def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
|
| 274 |
+
# First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
|
| 275 |
+
# same size as the number of input dimensions)
|
| 276 |
+
if scale_factor is not None:
|
| 277 |
+
# By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
|
| 278 |
+
if np.isscalar(scale_factor) and len(input_shape) > 1:
|
| 279 |
+
scale_factor = [scale_factor, scale_factor]
|
| 280 |
+
|
| 281 |
+
# We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
|
| 282 |
+
scale_factor = list(scale_factor)
|
| 283 |
+
scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
|
| 284 |
+
|
| 285 |
+
# Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
|
| 286 |
+
# to all the unspecified dimensions
|
| 287 |
+
if output_shape is not None:
|
| 288 |
+
output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape)))
|
| 289 |
+
|
| 290 |
+
# Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
|
| 291 |
+
# sub-optimal, because there can be different scales to the same output-shape.
|
| 292 |
+
if scale_factor is None:
|
| 293 |
+
scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
|
| 294 |
+
|
| 295 |
+
# Dealing with missing output-shape. calculating according to scale-factor
|
| 296 |
+
if output_shape is None:
|
| 297 |
+
output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
|
| 298 |
+
|
| 299 |
+
return scale_factor, output_shape
|
| 300 |
+
|
| 301 |
+
def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
| 302 |
+
# This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
|
| 303 |
+
# such that each position from the field_of_view will be multiplied with a matching filter from the
|
| 304 |
+
# 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
|
| 305 |
+
# around it. This is only done for one dimension of the image.
|
| 306 |
+
|
| 307 |
+
# When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
|
| 308 |
+
# 1/sf. this means filtering is more 'low-pass filter'.
|
| 309 |
+
fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
|
| 310 |
+
kernel_width *= 1.0 / scale if antialiasing else 1.0
|
| 311 |
+
|
| 312 |
+
# These are the coordinates of the output image
|
| 313 |
+
out_coordinates = np.arange(1, out_length + 1)
|
| 314 |
+
|
| 315 |
+
# since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting
|
| 316 |
+
# the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.
|
| 317 |
+
# to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.
|
| 318 |
+
shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
|
| 319 |
+
|
| 320 |
+
# These are the matching positions of the output-coordinates on the input image coordinates.
|
| 321 |
+
# Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
|
| 322 |
+
# [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
|
| 323 |
+
# The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
|
| 324 |
+
# the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
|
| 325 |
+
# one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
|
| 326 |
+
# So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
|
| 327 |
+
# at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
|
| 328 |
+
# (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
|
| 329 |
+
match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
|
| 330 |
+
|
| 331 |
+
# This is the left boundary to start multiplying the filter from, it depends on the size of the filter
|
| 332 |
+
left_boundary = np.floor(match_coordinates - kernel_width / 2)
|
| 333 |
+
|
| 334 |
+
# Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
|
| 335 |
+
# of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
|
| 336 |
+
expanded_kernel_width = np.ceil(kernel_width) + 2
|
| 337 |
+
|
| 338 |
+
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
|
| 339 |
+
# that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
|
| 340 |
+
# vertical dim is the pixels it 'sees' (kernel_size + 2)
|
| 341 |
+
field_of_view = np.squeeze(
|
| 342 |
+
np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
# Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
|
| 346 |
+
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
|
| 347 |
+
# 'field_of_view')
|
| 348 |
+
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
|
| 349 |
+
|
| 350 |
+
# Normalize weights to sum up to 1. be careful from dividing by 0
|
| 351 |
+
sum_weights = np.sum(weights, axis=1)
|
| 352 |
+
sum_weights[sum_weights == 0] = 1.0
|
| 353 |
+
weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
|
| 354 |
+
|
| 355 |
+
# We use this mirror structure as a trick for reflection padding at the boundaries
|
| 356 |
+
mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
|
| 357 |
+
field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
|
| 358 |
+
|
| 359 |
+
# Get rid of weights and pixel positions that are of zero weight
|
| 360 |
+
non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
|
| 361 |
+
weights = np.squeeze(weights[:, non_zero_out_pixels])
|
| 362 |
+
field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
|
| 363 |
+
|
| 364 |
+
# Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
|
| 365 |
+
return weights, field_of_view
|
| 366 |
+
|
| 367 |
+
self.down_sample = Resizer(in_shape, 1 / scale_factor)
|
| 368 |
+
for param in self.parameters():
|
| 369 |
+
param.requires_grad = False
|
| 370 |
+
|
| 371 |
+
def forward(self, data, **kwargs):
|
| 372 |
+
return self.down_sample(data)
|
| 373 |
+
|
| 374 |
+
# Gaussian blurring operator
|
| 375 |
+
class GaussialBlurOperator(nn.Module):
|
| 376 |
+
def __init__(self, kernel_size, intensity):
|
| 377 |
+
super().__init__()
|
| 378 |
+
|
| 379 |
+
class Blurkernel(nn.Module):
|
| 380 |
+
def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0):
|
| 381 |
+
super().__init__()
|
| 382 |
+
self.blur_type = blur_type
|
| 383 |
+
self.kernel_size = kernel_size
|
| 384 |
+
self.std = std
|
| 385 |
+
self.seq = nn.Sequential(
|
| 386 |
+
nn.ReflectionPad2d(self.kernel_size // 2),
|
| 387 |
+
nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3),
|
| 388 |
+
)
|
| 389 |
+
self.weights_init()
|
| 390 |
+
|
| 391 |
+
def forward(self, x):
|
| 392 |
+
return self.seq(x)
|
| 393 |
+
|
| 394 |
+
def weights_init(self):
|
| 395 |
+
if self.blur_type == "gaussian":
|
| 396 |
+
n = np.zeros((self.kernel_size, self.kernel_size))
|
| 397 |
+
n[self.kernel_size // 2, self.kernel_size // 2] = 1
|
| 398 |
+
k = scipy.ndimage.gaussian_filter(n, sigma=self.std)
|
| 399 |
+
k = torch.from_numpy(k)
|
| 400 |
+
self.k = k
|
| 401 |
+
for name, f in self.named_parameters():
|
| 402 |
+
f.data.copy_(k)
|
| 403 |
+
|
| 404 |
+
def update_weights(self, k):
|
| 405 |
+
if not torch.is_tensor(k):
|
| 406 |
+
k = torch.from_numpy(k)
|
| 407 |
+
for name, f in self.named_parameters():
|
| 408 |
+
f.data.copy_(k)
|
| 409 |
+
|
| 410 |
+
def get_kernel(self):
|
| 411 |
+
return self.k
|
| 412 |
+
|
| 413 |
+
self.kernel_size = kernel_size
|
| 414 |
+
self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity)
|
| 415 |
+
self.kernel = self.conv.get_kernel()
|
| 416 |
+
self.conv.update_weights(self.kernel.type(torch.float32))
|
| 417 |
+
|
| 418 |
+
for param in self.parameters():
|
| 419 |
+
param.requires_grad = False
|
| 420 |
+
|
| 421 |
+
def forward(self, data, **kwargs):
|
| 422 |
+
return self.conv(data)
|
| 423 |
+
|
| 424 |
+
def transpose(self, data, **kwargs):
|
| 425 |
+
return data
|
| 426 |
+
|
| 427 |
+
def get_kernel(self):
|
| 428 |
+
return self.kernel.view(1, 1, self.kernel_size, self.kernel_size)
|
| 429 |
+
|
| 430 |
+
# assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm
|
| 431 |
+
def RMSELoss(yhat, y):
|
| 432 |
+
return torch.sqrt(torch.sum((yhat - y) ** 2))
|
| 433 |
+
|
| 434 |
+
# set up source image
|
| 435 |
+
src = Image.open("sample.png")
|
| 436 |
+
# read image into [1,3,H,W]
|
| 437 |
+
src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None]
|
| 438 |
+
# normalize image to [-1,1]
|
| 439 |
+
src = (src / 127.5) - 1.0
|
| 440 |
+
src = src.to("cuda")
|
| 441 |
+
|
| 442 |
+
# set up operator and measurement
|
| 443 |
+
# operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda")
|
| 444 |
+
operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda")
|
| 445 |
+
measurement = operator(src)
|
| 446 |
+
|
| 447 |
+
# set up scheduler
|
| 448 |
+
scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256")
|
| 449 |
+
scheduler.set_timesteps(1000)
|
| 450 |
+
|
| 451 |
+
# set up model
|
| 452 |
+
model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda")
|
| 453 |
+
|
| 454 |
+
save_image((src + 1.0) / 2.0, "dps_src.png")
|
| 455 |
+
save_image((measurement + 1.0) / 2.0, "dps_mea.png")
|
| 456 |
+
|
| 457 |
+
# finally, the pipeline
|
| 458 |
+
dpspipe = DPSPipeline(model, scheduler)
|
| 459 |
+
image = dpspipe(
|
| 460 |
+
measurement=measurement,
|
| 461 |
+
operator=operator,
|
| 462 |
+
loss_fn=RMSELoss,
|
| 463 |
+
zeta=1.0,
|
| 464 |
+
).images[0]
|
| 465 |
+
|
| 466 |
+
image.save("dps_generated_image.png")
|
v0.27.0/edict_pipeline.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from tqdm.auto import tqdm
|
| 6 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 7 |
+
|
| 8 |
+
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
|
| 9 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
+
from diffusers.utils import (
|
| 11 |
+
deprecate,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class EDICTPipeline(DiffusionPipeline):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
vae: AutoencoderKL,
|
| 19 |
+
text_encoder: CLIPTextModel,
|
| 20 |
+
tokenizer: CLIPTokenizer,
|
| 21 |
+
unet: UNet2DConditionModel,
|
| 22 |
+
scheduler: DDIMScheduler,
|
| 23 |
+
mixing_coeff: float = 0.93,
|
| 24 |
+
leapfrog_steps: bool = True,
|
| 25 |
+
):
|
| 26 |
+
self.mixing_coeff = mixing_coeff
|
| 27 |
+
self.leapfrog_steps = leapfrog_steps
|
| 28 |
+
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.register_modules(
|
| 31 |
+
vae=vae,
|
| 32 |
+
text_encoder=text_encoder,
|
| 33 |
+
tokenizer=tokenizer,
|
| 34 |
+
unet=unet,
|
| 35 |
+
scheduler=scheduler,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 39 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 40 |
+
|
| 41 |
+
def _encode_prompt(
|
| 42 |
+
self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
|
| 43 |
+
):
|
| 44 |
+
text_inputs = self.tokenizer(
|
| 45 |
+
prompt,
|
| 46 |
+
padding="max_length",
|
| 47 |
+
max_length=self.tokenizer.model_max_length,
|
| 48 |
+
truncation=True,
|
| 49 |
+
return_tensors="pt",
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
|
| 53 |
+
|
| 54 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
|
| 55 |
+
|
| 56 |
+
if do_classifier_free_guidance:
|
| 57 |
+
uncond_tokens = "" if negative_prompt is None else negative_prompt
|
| 58 |
+
|
| 59 |
+
uncond_input = self.tokenizer(
|
| 60 |
+
uncond_tokens,
|
| 61 |
+
padding="max_length",
|
| 62 |
+
max_length=self.tokenizer.model_max_length,
|
| 63 |
+
truncation=True,
|
| 64 |
+
return_tensors="pt",
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
|
| 68 |
+
|
| 69 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 70 |
+
|
| 71 |
+
return prompt_embeds
|
| 72 |
+
|
| 73 |
+
def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
| 74 |
+
x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
|
| 75 |
+
y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
|
| 76 |
+
|
| 77 |
+
return [x, y]
|
| 78 |
+
|
| 79 |
+
def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
| 80 |
+
y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
|
| 81 |
+
x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
|
| 82 |
+
|
| 83 |
+
return [x, y]
|
| 84 |
+
|
| 85 |
+
def _get_alpha_and_beta(self, t: torch.Tensor):
|
| 86 |
+
# as self.alphas_cumprod is always in cpu
|
| 87 |
+
t = int(t)
|
| 88 |
+
|
| 89 |
+
alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
|
| 90 |
+
|
| 91 |
+
return alpha_prod, 1 - alpha_prod
|
| 92 |
+
|
| 93 |
+
def noise_step(
|
| 94 |
+
self,
|
| 95 |
+
base: torch.Tensor,
|
| 96 |
+
model_input: torch.Tensor,
|
| 97 |
+
model_output: torch.Tensor,
|
| 98 |
+
timestep: torch.Tensor,
|
| 99 |
+
):
|
| 100 |
+
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
| 101 |
+
|
| 102 |
+
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
| 103 |
+
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
| 104 |
+
|
| 105 |
+
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
| 106 |
+
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
| 107 |
+
|
| 108 |
+
next_model_input = (base - b_t * model_output) / a_t
|
| 109 |
+
|
| 110 |
+
return model_input, next_model_input.to(base.dtype)
|
| 111 |
+
|
| 112 |
+
def denoise_step(
|
| 113 |
+
self,
|
| 114 |
+
base: torch.Tensor,
|
| 115 |
+
model_input: torch.Tensor,
|
| 116 |
+
model_output: torch.Tensor,
|
| 117 |
+
timestep: torch.Tensor,
|
| 118 |
+
):
|
| 119 |
+
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
| 120 |
+
|
| 121 |
+
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
| 122 |
+
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
| 123 |
+
|
| 124 |
+
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
| 125 |
+
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
| 126 |
+
next_model_input = a_t * base + b_t * model_output
|
| 127 |
+
|
| 128 |
+
return model_input, next_model_input.to(base.dtype)
|
| 129 |
+
|
| 130 |
+
@torch.no_grad()
|
| 131 |
+
def decode_latents(self, latents: torch.Tensor):
|
| 132 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 133 |
+
image = self.vae.decode(latents).sample
|
| 134 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 135 |
+
return image
|
| 136 |
+
|
| 137 |
+
@torch.no_grad()
|
| 138 |
+
def prepare_latents(
|
| 139 |
+
self,
|
| 140 |
+
image: Image.Image,
|
| 141 |
+
text_embeds: torch.Tensor,
|
| 142 |
+
timesteps: torch.Tensor,
|
| 143 |
+
guidance_scale: float,
|
| 144 |
+
generator: Optional[torch.Generator] = None,
|
| 145 |
+
):
|
| 146 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 147 |
+
|
| 148 |
+
image = image.to(device=self.device, dtype=text_embeds.dtype)
|
| 149 |
+
latent = self.vae.encode(image).latent_dist.sample(generator)
|
| 150 |
+
|
| 151 |
+
latent = self.vae.config.scaling_factor * latent
|
| 152 |
+
|
| 153 |
+
coupled_latents = [latent.clone(), latent.clone()]
|
| 154 |
+
|
| 155 |
+
for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
|
| 156 |
+
coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
| 157 |
+
|
| 158 |
+
# j - model_input index, k - base index
|
| 159 |
+
for j in range(2):
|
| 160 |
+
k = j ^ 1
|
| 161 |
+
|
| 162 |
+
if self.leapfrog_steps:
|
| 163 |
+
if i % 2 == 0:
|
| 164 |
+
k, j = j, k
|
| 165 |
+
|
| 166 |
+
model_input = coupled_latents[j]
|
| 167 |
+
base = coupled_latents[k]
|
| 168 |
+
|
| 169 |
+
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
| 170 |
+
|
| 171 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
|
| 172 |
+
|
| 173 |
+
if do_classifier_free_guidance:
|
| 174 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 175 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 176 |
+
|
| 177 |
+
base, model_input = self.noise_step(
|
| 178 |
+
base=base,
|
| 179 |
+
model_input=model_input,
|
| 180 |
+
model_output=noise_pred,
|
| 181 |
+
timestep=t,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
coupled_latents[k] = model_input
|
| 185 |
+
|
| 186 |
+
return coupled_latents
|
| 187 |
+
|
| 188 |
+
@torch.no_grad()
|
| 189 |
+
def __call__(
|
| 190 |
+
self,
|
| 191 |
+
base_prompt: str,
|
| 192 |
+
target_prompt: str,
|
| 193 |
+
image: Image.Image,
|
| 194 |
+
guidance_scale: float = 3.0,
|
| 195 |
+
num_inference_steps: int = 50,
|
| 196 |
+
strength: float = 0.8,
|
| 197 |
+
negative_prompt: Optional[str] = None,
|
| 198 |
+
generator: Optional[torch.Generator] = None,
|
| 199 |
+
output_type: Optional[str] = "pil",
|
| 200 |
+
):
|
| 201 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 202 |
+
|
| 203 |
+
image = self.image_processor.preprocess(image)
|
| 204 |
+
|
| 205 |
+
base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
|
| 206 |
+
target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
|
| 207 |
+
|
| 208 |
+
self.scheduler.set_timesteps(num_inference_steps, self.device)
|
| 209 |
+
|
| 210 |
+
t_limit = num_inference_steps - int(num_inference_steps * strength)
|
| 211 |
+
fwd_timesteps = self.scheduler.timesteps[t_limit:]
|
| 212 |
+
bwd_timesteps = fwd_timesteps.flip(0)
|
| 213 |
+
|
| 214 |
+
coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
|
| 215 |
+
|
| 216 |
+
for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
|
| 217 |
+
# j - model_input index, k - base index
|
| 218 |
+
for k in range(2):
|
| 219 |
+
j = k ^ 1
|
| 220 |
+
|
| 221 |
+
if self.leapfrog_steps:
|
| 222 |
+
if i % 2 == 1:
|
| 223 |
+
k, j = j, k
|
| 224 |
+
|
| 225 |
+
model_input = coupled_latents[j]
|
| 226 |
+
base = coupled_latents[k]
|
| 227 |
+
|
| 228 |
+
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
| 229 |
+
|
| 230 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
|
| 231 |
+
|
| 232 |
+
if do_classifier_free_guidance:
|
| 233 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 234 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 235 |
+
|
| 236 |
+
base, model_input = self.denoise_step(
|
| 237 |
+
base=base,
|
| 238 |
+
model_input=model_input,
|
| 239 |
+
model_output=noise_pred,
|
| 240 |
+
timestep=t,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
coupled_latents[k] = model_input
|
| 244 |
+
|
| 245 |
+
coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
| 246 |
+
|
| 247 |
+
# either one is fine
|
| 248 |
+
final_latent = coupled_latents[0]
|
| 249 |
+
|
| 250 |
+
if output_type not in ["latent", "pt", "np", "pil"]:
|
| 251 |
+
deprecation_message = (
|
| 252 |
+
f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
|
| 253 |
+
"`pil`, `np`, `pt`, `latent`"
|
| 254 |
+
)
|
| 255 |
+
deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
|
| 256 |
+
output_type = "np"
|
| 257 |
+
|
| 258 |
+
if output_type == "latent":
|
| 259 |
+
image = final_latent
|
| 260 |
+
else:
|
| 261 |
+
image = self.decode_latents(final_latent)
|
| 262 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 263 |
+
|
| 264 |
+
return image
|
v0.27.0/gluegen.py
ADDED
|
@@ -0,0 +1,811 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Any, Dict, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor
|
| 7 |
+
|
| 8 |
+
from diffusers import DiffusionPipeline
|
| 9 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
+
from diffusers.loaders import LoraLoaderMixin
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 13 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 14 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 15 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 16 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 17 |
+
from diffusers.utils import (
|
| 18 |
+
USE_PEFT_BACKEND,
|
| 19 |
+
logging,
|
| 20 |
+
scale_lora_layers,
|
| 21 |
+
unscale_lora_layers,
|
| 22 |
+
)
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class TranslatorBase(nn.Module):
|
| 30 |
+
def __init__(self, num_tok, dim, dim_out, mult=2):
|
| 31 |
+
super().__init__()
|
| 32 |
+
|
| 33 |
+
self.dim_in = dim
|
| 34 |
+
self.dim_out = dim_out
|
| 35 |
+
|
| 36 |
+
self.net_tok = nn.Sequential(
|
| 37 |
+
nn.Linear(num_tok, int(num_tok * mult)),
|
| 38 |
+
nn.LayerNorm(int(num_tok * mult)),
|
| 39 |
+
nn.GELU(),
|
| 40 |
+
nn.Linear(int(num_tok * mult), int(num_tok * mult)),
|
| 41 |
+
nn.LayerNorm(int(num_tok * mult)),
|
| 42 |
+
nn.GELU(),
|
| 43 |
+
nn.Linear(int(num_tok * mult), num_tok),
|
| 44 |
+
nn.LayerNorm(num_tok),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
self.net_sen = nn.Sequential(
|
| 48 |
+
nn.Linear(dim, int(dim * mult)),
|
| 49 |
+
nn.LayerNorm(int(dim * mult)),
|
| 50 |
+
nn.GELU(),
|
| 51 |
+
nn.Linear(int(dim * mult), int(dim * mult)),
|
| 52 |
+
nn.LayerNorm(int(dim * mult)),
|
| 53 |
+
nn.GELU(),
|
| 54 |
+
nn.Linear(int(dim * mult), dim_out),
|
| 55 |
+
nn.LayerNorm(dim_out),
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def forward(self, x):
|
| 59 |
+
if self.dim_in == self.dim_out:
|
| 60 |
+
indentity_0 = x
|
| 61 |
+
x = self.net_sen(x)
|
| 62 |
+
x += indentity_0
|
| 63 |
+
x = x.transpose(1, 2)
|
| 64 |
+
|
| 65 |
+
indentity_1 = x
|
| 66 |
+
x = self.net_tok(x)
|
| 67 |
+
x += indentity_1
|
| 68 |
+
x = x.transpose(1, 2)
|
| 69 |
+
else:
|
| 70 |
+
x = self.net_sen(x)
|
| 71 |
+
x = x.transpose(1, 2)
|
| 72 |
+
|
| 73 |
+
x = self.net_tok(x)
|
| 74 |
+
x = x.transpose(1, 2)
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class TranslatorBaseNoLN(nn.Module):
|
| 79 |
+
def __init__(self, num_tok, dim, dim_out, mult=2):
|
| 80 |
+
super().__init__()
|
| 81 |
+
|
| 82 |
+
self.dim_in = dim
|
| 83 |
+
self.dim_out = dim_out
|
| 84 |
+
|
| 85 |
+
self.net_tok = nn.Sequential(
|
| 86 |
+
nn.Linear(num_tok, int(num_tok * mult)),
|
| 87 |
+
nn.GELU(),
|
| 88 |
+
nn.Linear(int(num_tok * mult), int(num_tok * mult)),
|
| 89 |
+
nn.GELU(),
|
| 90 |
+
nn.Linear(int(num_tok * mult), num_tok),
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
self.net_sen = nn.Sequential(
|
| 94 |
+
nn.Linear(dim, int(dim * mult)),
|
| 95 |
+
nn.GELU(),
|
| 96 |
+
nn.Linear(int(dim * mult), int(dim * mult)),
|
| 97 |
+
nn.GELU(),
|
| 98 |
+
nn.Linear(int(dim * mult), dim_out),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
def forward(self, x):
|
| 102 |
+
if self.dim_in == self.dim_out:
|
| 103 |
+
indentity_0 = x
|
| 104 |
+
x = self.net_sen(x)
|
| 105 |
+
x += indentity_0
|
| 106 |
+
x = x.transpose(1, 2)
|
| 107 |
+
|
| 108 |
+
indentity_1 = x
|
| 109 |
+
x = self.net_tok(x)
|
| 110 |
+
x += indentity_1
|
| 111 |
+
x = x.transpose(1, 2)
|
| 112 |
+
else:
|
| 113 |
+
x = self.net_sen(x)
|
| 114 |
+
x = x.transpose(1, 2)
|
| 115 |
+
|
| 116 |
+
x = self.net_tok(x)
|
| 117 |
+
x = x.transpose(1, 2)
|
| 118 |
+
return x
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class TranslatorNoLN(nn.Module):
|
| 122 |
+
def __init__(self, num_tok, dim, dim_out, mult=2, depth=5):
|
| 123 |
+
super().__init__()
|
| 124 |
+
|
| 125 |
+
self.blocks = nn.ModuleList([TranslatorBase(num_tok, dim, dim, mult=2) for d in range(depth)])
|
| 126 |
+
self.gelu = nn.GELU()
|
| 127 |
+
|
| 128 |
+
self.tail = TranslatorBaseNoLN(num_tok, dim, dim_out, mult=2)
|
| 129 |
+
|
| 130 |
+
def forward(self, x):
|
| 131 |
+
for block in self.blocks:
|
| 132 |
+
x = block(x) + x
|
| 133 |
+
x = self.gelu(x)
|
| 134 |
+
|
| 135 |
+
x = self.tail(x)
|
| 136 |
+
return x
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 140 |
+
"""
|
| 141 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 142 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 143 |
+
"""
|
| 144 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 145 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 146 |
+
# rescale the results from guidance (fixes overexposure)
|
| 147 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 148 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 149 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 150 |
+
return noise_cfg
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def retrieve_timesteps(
|
| 154 |
+
scheduler,
|
| 155 |
+
num_inference_steps: Optional[int] = None,
|
| 156 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 157 |
+
timesteps: Optional[List[int]] = None,
|
| 158 |
+
**kwargs,
|
| 159 |
+
):
|
| 160 |
+
"""
|
| 161 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 162 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
scheduler (`SchedulerMixin`):
|
| 166 |
+
The scheduler to get timesteps from.
|
| 167 |
+
num_inference_steps (`int`):
|
| 168 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 169 |
+
`timesteps` must be `None`.
|
| 170 |
+
device (`str` or `torch.device`, *optional*):
|
| 171 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 172 |
+
timesteps (`List[int]`, *optional*):
|
| 173 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 174 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 175 |
+
must be `None`.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 179 |
+
second element is the number of inference steps.
|
| 180 |
+
"""
|
| 181 |
+
if timesteps is not None:
|
| 182 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 183 |
+
if not accepts_timesteps:
|
| 184 |
+
raise ValueError(
|
| 185 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 186 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 187 |
+
)
|
| 188 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 189 |
+
timesteps = scheduler.timesteps
|
| 190 |
+
num_inference_steps = len(timesteps)
|
| 191 |
+
else:
|
| 192 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 193 |
+
timesteps = scheduler.timesteps
|
| 194 |
+
return timesteps, num_inference_steps
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class GlueGenStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, LoraLoaderMixin):
|
| 198 |
+
def __init__(
|
| 199 |
+
self,
|
| 200 |
+
vae: AutoencoderKL,
|
| 201 |
+
text_encoder: AutoModel,
|
| 202 |
+
tokenizer: AutoTokenizer,
|
| 203 |
+
unet: UNet2DConditionModel,
|
| 204 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 205 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 206 |
+
feature_extractor: CLIPImageProcessor,
|
| 207 |
+
language_adapter: TranslatorNoLN = None,
|
| 208 |
+
tensor_norm: torch.FloatTensor = None,
|
| 209 |
+
requires_safety_checker: bool = True,
|
| 210 |
+
):
|
| 211 |
+
super().__init__()
|
| 212 |
+
|
| 213 |
+
self.register_modules(
|
| 214 |
+
vae=vae,
|
| 215 |
+
text_encoder=text_encoder,
|
| 216 |
+
tokenizer=tokenizer,
|
| 217 |
+
unet=unet,
|
| 218 |
+
scheduler=scheduler,
|
| 219 |
+
safety_checker=safety_checker,
|
| 220 |
+
feature_extractor=feature_extractor,
|
| 221 |
+
language_adapter=language_adapter,
|
| 222 |
+
tensor_norm=tensor_norm,
|
| 223 |
+
)
|
| 224 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 225 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 227 |
+
|
| 228 |
+
def load_language_adapter(
|
| 229 |
+
self,
|
| 230 |
+
model_path: str,
|
| 231 |
+
num_token: int,
|
| 232 |
+
dim: int,
|
| 233 |
+
dim_out: int,
|
| 234 |
+
tensor_norm: torch.FloatTensor,
|
| 235 |
+
mult: int = 2,
|
| 236 |
+
depth: int = 5,
|
| 237 |
+
):
|
| 238 |
+
device = self._execution_device
|
| 239 |
+
self.tensor_norm = tensor_norm.to(device)
|
| 240 |
+
self.language_adapter = TranslatorNoLN(num_tok=num_token, dim=dim, dim_out=dim_out, mult=mult, depth=depth).to(
|
| 241 |
+
device
|
| 242 |
+
)
|
| 243 |
+
self.language_adapter.load_state_dict(torch.load(model_path))
|
| 244 |
+
|
| 245 |
+
def _adapt_language(self, prompt_embeds: torch.FloatTensor):
|
| 246 |
+
prompt_embeds = prompt_embeds / 3
|
| 247 |
+
prompt_embeds = self.language_adapter(prompt_embeds) * (self.tensor_norm / 2)
|
| 248 |
+
return prompt_embeds
|
| 249 |
+
|
| 250 |
+
def encode_prompt(
|
| 251 |
+
self,
|
| 252 |
+
prompt,
|
| 253 |
+
device,
|
| 254 |
+
num_images_per_prompt,
|
| 255 |
+
do_classifier_free_guidance,
|
| 256 |
+
negative_prompt=None,
|
| 257 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 258 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 259 |
+
lora_scale: Optional[float] = None,
|
| 260 |
+
clip_skip: Optional[int] = None,
|
| 261 |
+
):
|
| 262 |
+
r"""
|
| 263 |
+
Encodes the prompt into text encoder hidden states.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 267 |
+
prompt to be encoded
|
| 268 |
+
device: (`torch.device`):
|
| 269 |
+
torch device
|
| 270 |
+
num_images_per_prompt (`int`):
|
| 271 |
+
number of images that should be generated per prompt
|
| 272 |
+
do_classifier_free_guidance (`bool`):
|
| 273 |
+
whether to use classifier free guidance or not
|
| 274 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 275 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 276 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 277 |
+
less than `1`).
|
| 278 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 279 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 280 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 281 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 282 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 283 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 284 |
+
argument.
|
| 285 |
+
lora_scale (`float`, *optional*):
|
| 286 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 287 |
+
clip_skip (`int`, *optional*):
|
| 288 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 289 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 290 |
+
"""
|
| 291 |
+
# set lora scale so that monkey patched LoRA
|
| 292 |
+
# function of text encoder can correctly access it
|
| 293 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 294 |
+
self._lora_scale = lora_scale
|
| 295 |
+
|
| 296 |
+
# dynamically adjust the LoRA scale
|
| 297 |
+
if not USE_PEFT_BACKEND:
|
| 298 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 299 |
+
else:
|
| 300 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 301 |
+
|
| 302 |
+
if prompt is not None and isinstance(prompt, str):
|
| 303 |
+
batch_size = 1
|
| 304 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 305 |
+
batch_size = len(prompt)
|
| 306 |
+
else:
|
| 307 |
+
batch_size = prompt_embeds.shape[0]
|
| 308 |
+
|
| 309 |
+
if prompt_embeds is None:
|
| 310 |
+
text_inputs = self.tokenizer(
|
| 311 |
+
prompt,
|
| 312 |
+
padding="max_length",
|
| 313 |
+
max_length=self.tokenizer.model_max_length,
|
| 314 |
+
truncation=True,
|
| 315 |
+
return_tensors="pt",
|
| 316 |
+
)
|
| 317 |
+
text_input_ids = text_inputs.input_ids
|
| 318 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 319 |
+
|
| 320 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 321 |
+
text_input_ids, untruncated_ids
|
| 322 |
+
):
|
| 323 |
+
removed_text = self.tokenizer.batch_decode(
|
| 324 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 325 |
+
)
|
| 326 |
+
logger.warning(
|
| 327 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 328 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 332 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 333 |
+
elif self.language_adapter is not None:
|
| 334 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 335 |
+
else:
|
| 336 |
+
attention_mask = None
|
| 337 |
+
|
| 338 |
+
if clip_skip is None:
|
| 339 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 340 |
+
prompt_embeds = prompt_embeds[0]
|
| 341 |
+
|
| 342 |
+
else:
|
| 343 |
+
prompt_embeds = self.text_encoder(
|
| 344 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 345 |
+
)
|
| 346 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 347 |
+
# all the hidden states from the encoder layers. Then index into
|
| 348 |
+
# the tuple to access the hidden states from the desired layer.
|
| 349 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 350 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 351 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 352 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 353 |
+
# layer.
|
| 354 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 355 |
+
|
| 356 |
+
# Run prompt language adapter
|
| 357 |
+
if self.language_adapter is not None:
|
| 358 |
+
prompt_embeds = self._adapt_language(prompt_embeds)
|
| 359 |
+
|
| 360 |
+
if self.text_encoder is not None:
|
| 361 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 362 |
+
elif self.unet is not None:
|
| 363 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 364 |
+
else:
|
| 365 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 366 |
+
|
| 367 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 368 |
+
|
| 369 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 370 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 371 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 372 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 373 |
+
|
| 374 |
+
# get unconditional embeddings for classifier free guidance
|
| 375 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 376 |
+
uncond_tokens: List[str]
|
| 377 |
+
if negative_prompt is None:
|
| 378 |
+
uncond_tokens = [""] * batch_size
|
| 379 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 380 |
+
raise TypeError(
|
| 381 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 382 |
+
f" {type(prompt)}."
|
| 383 |
+
)
|
| 384 |
+
elif isinstance(negative_prompt, str):
|
| 385 |
+
uncond_tokens = [negative_prompt]
|
| 386 |
+
elif batch_size != len(negative_prompt):
|
| 387 |
+
raise ValueError(
|
| 388 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 389 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 390 |
+
" the batch size of `prompt`."
|
| 391 |
+
)
|
| 392 |
+
else:
|
| 393 |
+
uncond_tokens = negative_prompt
|
| 394 |
+
|
| 395 |
+
max_length = prompt_embeds.shape[1]
|
| 396 |
+
uncond_input = self.tokenizer(
|
| 397 |
+
uncond_tokens,
|
| 398 |
+
padding="max_length",
|
| 399 |
+
max_length=max_length,
|
| 400 |
+
truncation=True,
|
| 401 |
+
return_tensors="pt",
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 405 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 406 |
+
else:
|
| 407 |
+
attention_mask = None
|
| 408 |
+
|
| 409 |
+
negative_prompt_embeds = self.text_encoder(
|
| 410 |
+
uncond_input.input_ids.to(device),
|
| 411 |
+
attention_mask=attention_mask,
|
| 412 |
+
)
|
| 413 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 414 |
+
# Run negative prompt language adapter
|
| 415 |
+
if self.language_adapter is not None:
|
| 416 |
+
negative_prompt_embeds = self._adapt_language(negative_prompt_embeds)
|
| 417 |
+
|
| 418 |
+
if do_classifier_free_guidance:
|
| 419 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 420 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 421 |
+
|
| 422 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 423 |
+
|
| 424 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 425 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 426 |
+
|
| 427 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 428 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 429 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 430 |
+
|
| 431 |
+
return prompt_embeds, negative_prompt_embeds
|
| 432 |
+
|
| 433 |
+
def run_safety_checker(self, image, device, dtype):
|
| 434 |
+
if self.safety_checker is None:
|
| 435 |
+
has_nsfw_concept = None
|
| 436 |
+
else:
|
| 437 |
+
if torch.is_tensor(image):
|
| 438 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 439 |
+
else:
|
| 440 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 441 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 442 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 443 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 444 |
+
)
|
| 445 |
+
return image, has_nsfw_concept
|
| 446 |
+
|
| 447 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 448 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 449 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 450 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 451 |
+
# and should be between [0, 1]
|
| 452 |
+
|
| 453 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 454 |
+
extra_step_kwargs = {}
|
| 455 |
+
if accepts_eta:
|
| 456 |
+
extra_step_kwargs["eta"] = eta
|
| 457 |
+
|
| 458 |
+
# check if the scheduler accepts generator
|
| 459 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 460 |
+
if accepts_generator:
|
| 461 |
+
extra_step_kwargs["generator"] = generator
|
| 462 |
+
return extra_step_kwargs
|
| 463 |
+
|
| 464 |
+
def check_inputs(
|
| 465 |
+
self,
|
| 466 |
+
prompt,
|
| 467 |
+
height,
|
| 468 |
+
width,
|
| 469 |
+
negative_prompt=None,
|
| 470 |
+
prompt_embeds=None,
|
| 471 |
+
negative_prompt_embeds=None,
|
| 472 |
+
):
|
| 473 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 474 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 475 |
+
|
| 476 |
+
if prompt is not None and prompt_embeds is not None:
|
| 477 |
+
raise ValueError(
|
| 478 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 479 |
+
" only forward one of the two."
|
| 480 |
+
)
|
| 481 |
+
elif prompt is None and prompt_embeds is None:
|
| 482 |
+
raise ValueError(
|
| 483 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 484 |
+
)
|
| 485 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 486 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 487 |
+
|
| 488 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 489 |
+
raise ValueError(
|
| 490 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 491 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 495 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 496 |
+
raise ValueError(
|
| 497 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 498 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 499 |
+
f" {negative_prompt_embeds.shape}."
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 503 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 504 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 507 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
if latents is None:
|
| 511 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 512 |
+
else:
|
| 513 |
+
latents = latents.to(device)
|
| 514 |
+
|
| 515 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 516 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 517 |
+
return latents
|
| 518 |
+
|
| 519 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 520 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 521 |
+
"""
|
| 522 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 523 |
+
|
| 524 |
+
Args:
|
| 525 |
+
timesteps (`torch.Tensor`):
|
| 526 |
+
generate embedding vectors at these timesteps
|
| 527 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 528 |
+
dimension of the embeddings to generate
|
| 529 |
+
dtype:
|
| 530 |
+
data type of the generated embeddings
|
| 531 |
+
|
| 532 |
+
Returns:
|
| 533 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 534 |
+
"""
|
| 535 |
+
assert len(w.shape) == 1
|
| 536 |
+
w = w * 1000.0
|
| 537 |
+
|
| 538 |
+
half_dim = embedding_dim // 2
|
| 539 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 540 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 541 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 542 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 543 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 544 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 545 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 546 |
+
return emb
|
| 547 |
+
|
| 548 |
+
@property
|
| 549 |
+
def guidance_scale(self):
|
| 550 |
+
return self._guidance_scale
|
| 551 |
+
|
| 552 |
+
@property
|
| 553 |
+
def guidance_rescale(self):
|
| 554 |
+
return self._guidance_rescale
|
| 555 |
+
|
| 556 |
+
@property
|
| 557 |
+
def clip_skip(self):
|
| 558 |
+
return self._clip_skip
|
| 559 |
+
|
| 560 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 561 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 562 |
+
# corresponds to doing no classifier free guidance.
|
| 563 |
+
@property
|
| 564 |
+
def do_classifier_free_guidance(self):
|
| 565 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 566 |
+
|
| 567 |
+
@property
|
| 568 |
+
def cross_attention_kwargs(self):
|
| 569 |
+
return self._cross_attention_kwargs
|
| 570 |
+
|
| 571 |
+
@property
|
| 572 |
+
def num_timesteps(self):
|
| 573 |
+
return self._num_timesteps
|
| 574 |
+
|
| 575 |
+
@property
|
| 576 |
+
def interrupt(self):
|
| 577 |
+
return self._interrupt
|
| 578 |
+
|
| 579 |
+
@torch.no_grad()
|
| 580 |
+
def __call__(
|
| 581 |
+
self,
|
| 582 |
+
prompt: Union[str, List[str]] = None,
|
| 583 |
+
height: Optional[int] = None,
|
| 584 |
+
width: Optional[int] = None,
|
| 585 |
+
num_inference_steps: int = 50,
|
| 586 |
+
timesteps: List[int] = None,
|
| 587 |
+
guidance_scale: float = 7.5,
|
| 588 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 589 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 590 |
+
eta: float = 0.0,
|
| 591 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 592 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 593 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 594 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 595 |
+
output_type: Optional[str] = "pil",
|
| 596 |
+
return_dict: bool = True,
|
| 597 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 598 |
+
guidance_rescale: float = 0.0,
|
| 599 |
+
clip_skip: Optional[int] = None,
|
| 600 |
+
**kwargs,
|
| 601 |
+
):
|
| 602 |
+
r"""
|
| 603 |
+
The call function to the pipeline for generation.
|
| 604 |
+
|
| 605 |
+
Args:
|
| 606 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 607 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 608 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 609 |
+
The height in pixels of the generated image.
|
| 610 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 611 |
+
The width in pixels of the generated image.
|
| 612 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 613 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 614 |
+
expense of slower inference.
|
| 615 |
+
timesteps (`List[int]`, *optional*):
|
| 616 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 617 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 618 |
+
passed will be used. Must be in descending order.
|
| 619 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 620 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 621 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 622 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 623 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 624 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 625 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 626 |
+
The number of images to generate per prompt.
|
| 627 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 628 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 629 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 630 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 631 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 632 |
+
generation deterministic.
|
| 633 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 634 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 635 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 636 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 637 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 638 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 639 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 640 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 641 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 642 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 643 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 644 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 645 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 646 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 647 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 648 |
+
plain tuple.
|
| 649 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 650 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 651 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 652 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 653 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 654 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 655 |
+
using zero terminal SNR.
|
| 656 |
+
clip_skip (`int`, *optional*):
|
| 657 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 658 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 659 |
+
|
| 660 |
+
Examples:
|
| 661 |
+
|
| 662 |
+
Returns:
|
| 663 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 664 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 665 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 666 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 667 |
+
"not-safe-for-work" (nsfw) content.
|
| 668 |
+
"""
|
| 669 |
+
|
| 670 |
+
# 0. Default height and width to unet
|
| 671 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 672 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 673 |
+
# to deal with lora scaling and other possible forward hooks
|
| 674 |
+
|
| 675 |
+
# 1. Check inputs. Raise error if not correct
|
| 676 |
+
self.check_inputs(
|
| 677 |
+
prompt,
|
| 678 |
+
height,
|
| 679 |
+
width,
|
| 680 |
+
negative_prompt,
|
| 681 |
+
prompt_embeds,
|
| 682 |
+
negative_prompt_embeds,
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
self._guidance_scale = guidance_scale
|
| 686 |
+
self._guidance_rescale = guidance_rescale
|
| 687 |
+
self._clip_skip = clip_skip
|
| 688 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 689 |
+
self._interrupt = False
|
| 690 |
+
|
| 691 |
+
# 2. Define call parameters
|
| 692 |
+
if prompt is not None and isinstance(prompt, str):
|
| 693 |
+
batch_size = 1
|
| 694 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 695 |
+
batch_size = len(prompt)
|
| 696 |
+
else:
|
| 697 |
+
batch_size = prompt_embeds.shape[0]
|
| 698 |
+
|
| 699 |
+
device = self._execution_device
|
| 700 |
+
|
| 701 |
+
# 3. Encode input prompt
|
| 702 |
+
lora_scale = (
|
| 703 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 707 |
+
prompt,
|
| 708 |
+
device,
|
| 709 |
+
num_images_per_prompt,
|
| 710 |
+
self.do_classifier_free_guidance,
|
| 711 |
+
negative_prompt,
|
| 712 |
+
prompt_embeds=prompt_embeds,
|
| 713 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 714 |
+
lora_scale=lora_scale,
|
| 715 |
+
clip_skip=self.clip_skip,
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 719 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 720 |
+
# to avoid doing two forward passes
|
| 721 |
+
if self.do_classifier_free_guidance:
|
| 722 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 723 |
+
|
| 724 |
+
# 4. Prepare timesteps
|
| 725 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 726 |
+
|
| 727 |
+
# 5. Prepare latent variables
|
| 728 |
+
num_channels_latents = self.unet.config.in_channels
|
| 729 |
+
latents = self.prepare_latents(
|
| 730 |
+
batch_size * num_images_per_prompt,
|
| 731 |
+
num_channels_latents,
|
| 732 |
+
height,
|
| 733 |
+
width,
|
| 734 |
+
prompt_embeds.dtype,
|
| 735 |
+
device,
|
| 736 |
+
generator,
|
| 737 |
+
latents,
|
| 738 |
+
)
|
| 739 |
+
|
| 740 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 741 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 742 |
+
|
| 743 |
+
# 6.2 Optionally get Guidance Scale Embedding
|
| 744 |
+
timestep_cond = None
|
| 745 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 746 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 747 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 748 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 749 |
+
).to(device=device, dtype=latents.dtype)
|
| 750 |
+
|
| 751 |
+
# 7. Denoising loop
|
| 752 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 753 |
+
self._num_timesteps = len(timesteps)
|
| 754 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 755 |
+
for i, t in enumerate(timesteps):
|
| 756 |
+
if self.interrupt:
|
| 757 |
+
continue
|
| 758 |
+
|
| 759 |
+
# expand the latents if we are doing classifier free guidance
|
| 760 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 761 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 762 |
+
|
| 763 |
+
# predict the noise residual
|
| 764 |
+
noise_pred = self.unet(
|
| 765 |
+
latent_model_input,
|
| 766 |
+
t,
|
| 767 |
+
encoder_hidden_states=prompt_embeds,
|
| 768 |
+
timestep_cond=timestep_cond,
|
| 769 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 770 |
+
return_dict=False,
|
| 771 |
+
)[0]
|
| 772 |
+
|
| 773 |
+
# perform guidance
|
| 774 |
+
if self.do_classifier_free_guidance:
|
| 775 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 776 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 777 |
+
|
| 778 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 779 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 780 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 781 |
+
|
| 782 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 783 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 784 |
+
|
| 785 |
+
# call the callback, if provided
|
| 786 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 787 |
+
progress_bar.update()
|
| 788 |
+
|
| 789 |
+
if not output_type == "latent":
|
| 790 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 791 |
+
0
|
| 792 |
+
]
|
| 793 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 794 |
+
else:
|
| 795 |
+
image = latents
|
| 796 |
+
has_nsfw_concept = None
|
| 797 |
+
|
| 798 |
+
if has_nsfw_concept is None:
|
| 799 |
+
do_denormalize = [True] * image.shape[0]
|
| 800 |
+
else:
|
| 801 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 802 |
+
|
| 803 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 804 |
+
|
| 805 |
+
# Offload all models
|
| 806 |
+
self.maybe_free_model_hooks()
|
| 807 |
+
|
| 808 |
+
if not return_dict:
|
| 809 |
+
return (image, has_nsfw_concept)
|
| 810 |
+
|
| 811 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/iadb.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from diffusers import DiffusionPipeline
|
| 6 |
+
from diffusers.configuration_utils import ConfigMixin
|
| 7 |
+
from diffusers.pipelines.pipeline_utils import ImagePipelineOutput
|
| 8 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class IADBScheduler(SchedulerMixin, ConfigMixin):
|
| 12 |
+
"""
|
| 13 |
+
IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
|
| 14 |
+
|
| 15 |
+
For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def step(
|
| 19 |
+
self,
|
| 20 |
+
model_output: torch.FloatTensor,
|
| 21 |
+
timestep: int,
|
| 22 |
+
x_alpha: torch.FloatTensor,
|
| 23 |
+
) -> torch.FloatTensor:
|
| 24 |
+
"""
|
| 25 |
+
Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
|
| 26 |
+
process from the learned model outputs (most often the predicted noise).
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
model_output (`torch.FloatTensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
|
| 30 |
+
timestep (`float`): current timestep in the diffusion chain.
|
| 31 |
+
x_alpha (`torch.FloatTensor`): x_alpha sample for the current timestep
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
`torch.FloatTensor`: the sample at the previous timestep
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
if self.num_inference_steps is None:
|
| 38 |
+
raise ValueError(
|
| 39 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
alpha = timestep / self.num_inference_steps
|
| 43 |
+
alpha_next = (timestep + 1) / self.num_inference_steps
|
| 44 |
+
|
| 45 |
+
d = model_output
|
| 46 |
+
|
| 47 |
+
x_alpha = x_alpha + (alpha_next - alpha) * d
|
| 48 |
+
|
| 49 |
+
return x_alpha
|
| 50 |
+
|
| 51 |
+
def set_timesteps(self, num_inference_steps: int):
|
| 52 |
+
self.num_inference_steps = num_inference_steps
|
| 53 |
+
|
| 54 |
+
def add_noise(
|
| 55 |
+
self,
|
| 56 |
+
original_samples: torch.FloatTensor,
|
| 57 |
+
noise: torch.FloatTensor,
|
| 58 |
+
alpha: torch.FloatTensor,
|
| 59 |
+
) -> torch.FloatTensor:
|
| 60 |
+
return original_samples * alpha + noise * (1 - alpha)
|
| 61 |
+
|
| 62 |
+
def __len__(self):
|
| 63 |
+
return self.config.num_train_timesteps
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class IADBPipeline(DiffusionPipeline):
|
| 67 |
+
r"""
|
| 68 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 69 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 70 |
+
|
| 71 |
+
Parameters:
|
| 72 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 73 |
+
scheduler ([`SchedulerMixin`]):
|
| 74 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 75 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, unet, scheduler):
|
| 79 |
+
super().__init__()
|
| 80 |
+
|
| 81 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 82 |
+
|
| 83 |
+
@torch.no_grad()
|
| 84 |
+
def __call__(
|
| 85 |
+
self,
|
| 86 |
+
batch_size: int = 1,
|
| 87 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 88 |
+
num_inference_steps: int = 50,
|
| 89 |
+
output_type: Optional[str] = "pil",
|
| 90 |
+
return_dict: bool = True,
|
| 91 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 92 |
+
r"""
|
| 93 |
+
Args:
|
| 94 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 95 |
+
The number of images to generate.
|
| 96 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 97 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 98 |
+
expense of slower inference.
|
| 99 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 100 |
+
The output format of the generate image. Choose between
|
| 101 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 102 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 103 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
| 107 |
+
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
# Sample gaussian noise to begin loop
|
| 111 |
+
if isinstance(self.unet.config.sample_size, int):
|
| 112 |
+
image_shape = (
|
| 113 |
+
batch_size,
|
| 114 |
+
self.unet.config.in_channels,
|
| 115 |
+
self.unet.config.sample_size,
|
| 116 |
+
self.unet.config.sample_size,
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
| 120 |
+
|
| 121 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 124 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
|
| 128 |
+
|
| 129 |
+
# set step values
|
| 130 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 131 |
+
x_alpha = image.clone()
|
| 132 |
+
for t in self.progress_bar(range(num_inference_steps)):
|
| 133 |
+
alpha = t / num_inference_steps
|
| 134 |
+
|
| 135 |
+
# 1. predict noise model_output
|
| 136 |
+
model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
|
| 137 |
+
|
| 138 |
+
# 2. step
|
| 139 |
+
x_alpha = self.scheduler.step(model_output, t, x_alpha)
|
| 140 |
+
|
| 141 |
+
image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
|
| 142 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 143 |
+
if output_type == "pil":
|
| 144 |
+
image = self.numpy_to_pil(image)
|
| 145 |
+
|
| 146 |
+
if not return_dict:
|
| 147 |
+
return (image,)
|
| 148 |
+
|
| 149 |
+
return ImagePipelineOutput(images=image)
|
v0.27.0/imagic_stable_diffusion.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
modeled after the textual_inversion.py / train_dreambooth.py and the work
|
| 3 |
+
of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
|
| 4 |
+
"""
|
| 5 |
+
import inspect
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Union
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import PIL.Image
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
from accelerate import Accelerator
|
| 14 |
+
|
| 15 |
+
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
| 16 |
+
from packaging import version
|
| 17 |
+
from tqdm.auto import tqdm
|
| 18 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 19 |
+
|
| 20 |
+
from diffusers import DiffusionPipeline
|
| 21 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 22 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 23 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 24 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 25 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 26 |
+
from diffusers.utils import logging
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
| 30 |
+
PIL_INTERPOLATION = {
|
| 31 |
+
"linear": PIL.Image.Resampling.BILINEAR,
|
| 32 |
+
"bilinear": PIL.Image.Resampling.BILINEAR,
|
| 33 |
+
"bicubic": PIL.Image.Resampling.BICUBIC,
|
| 34 |
+
"lanczos": PIL.Image.Resampling.LANCZOS,
|
| 35 |
+
"nearest": PIL.Image.Resampling.NEAREST,
|
| 36 |
+
}
|
| 37 |
+
else:
|
| 38 |
+
PIL_INTERPOLATION = {
|
| 39 |
+
"linear": PIL.Image.LINEAR,
|
| 40 |
+
"bilinear": PIL.Image.BILINEAR,
|
| 41 |
+
"bicubic": PIL.Image.BICUBIC,
|
| 42 |
+
"lanczos": PIL.Image.LANCZOS,
|
| 43 |
+
"nearest": PIL.Image.NEAREST,
|
| 44 |
+
}
|
| 45 |
+
# ------------------------------------------------------------------------------
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def preprocess(image):
|
| 51 |
+
w, h = image.size
|
| 52 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 53 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 54 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 55 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 56 |
+
image = torch.from_numpy(image)
|
| 57 |
+
return 2.0 * image - 1.0
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 61 |
+
r"""
|
| 62 |
+
Pipeline for imagic image editing.
|
| 63 |
+
See paper here: https://arxiv.org/pdf/2210.09276.pdf
|
| 64 |
+
|
| 65 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 66 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 67 |
+
Args:
|
| 68 |
+
vae ([`AutoencoderKL`]):
|
| 69 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 70 |
+
text_encoder ([`CLIPTextModel`]):
|
| 71 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 72 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 73 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 74 |
+
tokenizer (`CLIPTokenizer`):
|
| 75 |
+
Tokenizer of class
|
| 76 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 77 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 78 |
+
scheduler ([`SchedulerMixin`]):
|
| 79 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 80 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 81 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 82 |
+
Classification module that estimates whether generated images could be considered offsensive or harmful.
|
| 83 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 84 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 85 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
vae: AutoencoderKL,
|
| 91 |
+
text_encoder: CLIPTextModel,
|
| 92 |
+
tokenizer: CLIPTokenizer,
|
| 93 |
+
unet: UNet2DConditionModel,
|
| 94 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 95 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 96 |
+
feature_extractor: CLIPImageProcessor,
|
| 97 |
+
):
|
| 98 |
+
super().__init__()
|
| 99 |
+
self.register_modules(
|
| 100 |
+
vae=vae,
|
| 101 |
+
text_encoder=text_encoder,
|
| 102 |
+
tokenizer=tokenizer,
|
| 103 |
+
unet=unet,
|
| 104 |
+
scheduler=scheduler,
|
| 105 |
+
safety_checker=safety_checker,
|
| 106 |
+
feature_extractor=feature_extractor,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
def train(
|
| 110 |
+
self,
|
| 111 |
+
prompt: Union[str, List[str]],
|
| 112 |
+
image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 113 |
+
height: Optional[int] = 512,
|
| 114 |
+
width: Optional[int] = 512,
|
| 115 |
+
generator: Optional[torch.Generator] = None,
|
| 116 |
+
embedding_learning_rate: float = 0.001,
|
| 117 |
+
diffusion_model_learning_rate: float = 2e-6,
|
| 118 |
+
text_embedding_optimization_steps: int = 500,
|
| 119 |
+
model_fine_tuning_optimization_steps: int = 1000,
|
| 120 |
+
**kwargs,
|
| 121 |
+
):
|
| 122 |
+
r"""
|
| 123 |
+
Function invoked when calling the pipeline for generation.
|
| 124 |
+
Args:
|
| 125 |
+
prompt (`str` or `List[str]`):
|
| 126 |
+
The prompt or prompts to guide the image generation.
|
| 127 |
+
height (`int`, *optional*, defaults to 512):
|
| 128 |
+
The height in pixels of the generated image.
|
| 129 |
+
width (`int`, *optional*, defaults to 512):
|
| 130 |
+
The width in pixels of the generated image.
|
| 131 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 132 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 133 |
+
expense of slower inference.
|
| 134 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 135 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 136 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 137 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 138 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 139 |
+
usually at the expense of lower image quality.
|
| 140 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 141 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 142 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 143 |
+
generator (`torch.Generator`, *optional*):
|
| 144 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 145 |
+
deterministic.
|
| 146 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 147 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 148 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 149 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 150 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 151 |
+
The output format of the generate image. Choose between
|
| 152 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
|
| 153 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 154 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 155 |
+
plain tuple.
|
| 156 |
+
Returns:
|
| 157 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 158 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 159 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 160 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 161 |
+
(nsfw) content, according to the `safety_checker`.
|
| 162 |
+
"""
|
| 163 |
+
accelerator = Accelerator(
|
| 164 |
+
gradient_accumulation_steps=1,
|
| 165 |
+
mixed_precision="fp16",
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if "torch_device" in kwargs:
|
| 169 |
+
device = kwargs.pop("torch_device")
|
| 170 |
+
warnings.warn(
|
| 171 |
+
"`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
|
| 172 |
+
" Consider using `pipe.to(torch_device)` instead."
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
if device is None:
|
| 176 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 177 |
+
self.to(device)
|
| 178 |
+
|
| 179 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 180 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 181 |
+
|
| 182 |
+
# Freeze vae and unet
|
| 183 |
+
self.vae.requires_grad_(False)
|
| 184 |
+
self.unet.requires_grad_(False)
|
| 185 |
+
self.text_encoder.requires_grad_(False)
|
| 186 |
+
self.unet.eval()
|
| 187 |
+
self.vae.eval()
|
| 188 |
+
self.text_encoder.eval()
|
| 189 |
+
|
| 190 |
+
if accelerator.is_main_process:
|
| 191 |
+
accelerator.init_trackers(
|
| 192 |
+
"imagic",
|
| 193 |
+
config={
|
| 194 |
+
"embedding_learning_rate": embedding_learning_rate,
|
| 195 |
+
"text_embedding_optimization_steps": text_embedding_optimization_steps,
|
| 196 |
+
},
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# get text embeddings for prompt
|
| 200 |
+
text_input = self.tokenizer(
|
| 201 |
+
prompt,
|
| 202 |
+
padding="max_length",
|
| 203 |
+
max_length=self.tokenizer.model_max_length,
|
| 204 |
+
truncation=True,
|
| 205 |
+
return_tensors="pt",
|
| 206 |
+
)
|
| 207 |
+
text_embeddings = torch.nn.Parameter(
|
| 208 |
+
self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
|
| 209 |
+
)
|
| 210 |
+
text_embeddings = text_embeddings.detach()
|
| 211 |
+
text_embeddings.requires_grad_()
|
| 212 |
+
text_embeddings_orig = text_embeddings.clone()
|
| 213 |
+
|
| 214 |
+
# Initialize the optimizer
|
| 215 |
+
optimizer = torch.optim.Adam(
|
| 216 |
+
[text_embeddings], # only optimize the embeddings
|
| 217 |
+
lr=embedding_learning_rate,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if isinstance(image, PIL.Image.Image):
|
| 221 |
+
image = preprocess(image)
|
| 222 |
+
|
| 223 |
+
latents_dtype = text_embeddings.dtype
|
| 224 |
+
image = image.to(device=self.device, dtype=latents_dtype)
|
| 225 |
+
init_latent_image_dist = self.vae.encode(image).latent_dist
|
| 226 |
+
image_latents = init_latent_image_dist.sample(generator=generator)
|
| 227 |
+
image_latents = 0.18215 * image_latents
|
| 228 |
+
|
| 229 |
+
progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
|
| 230 |
+
progress_bar.set_description("Steps")
|
| 231 |
+
|
| 232 |
+
global_step = 0
|
| 233 |
+
|
| 234 |
+
logger.info("First optimizing the text embedding to better reconstruct the init image")
|
| 235 |
+
for _ in range(text_embedding_optimization_steps):
|
| 236 |
+
with accelerator.accumulate(text_embeddings):
|
| 237 |
+
# Sample noise that we'll add to the latents
|
| 238 |
+
noise = torch.randn(image_latents.shape).to(image_latents.device)
|
| 239 |
+
timesteps = torch.randint(1000, (1,), device=image_latents.device)
|
| 240 |
+
|
| 241 |
+
# Add noise to the latents according to the noise magnitude at each timestep
|
| 242 |
+
# (this is the forward diffusion process)
|
| 243 |
+
noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
|
| 244 |
+
|
| 245 |
+
# Predict the noise residual
|
| 246 |
+
noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
|
| 247 |
+
|
| 248 |
+
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
|
| 249 |
+
accelerator.backward(loss)
|
| 250 |
+
|
| 251 |
+
optimizer.step()
|
| 252 |
+
optimizer.zero_grad()
|
| 253 |
+
|
| 254 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 255 |
+
if accelerator.sync_gradients:
|
| 256 |
+
progress_bar.update(1)
|
| 257 |
+
global_step += 1
|
| 258 |
+
|
| 259 |
+
logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
|
| 260 |
+
progress_bar.set_postfix(**logs)
|
| 261 |
+
accelerator.log(logs, step=global_step)
|
| 262 |
+
|
| 263 |
+
accelerator.wait_for_everyone()
|
| 264 |
+
|
| 265 |
+
text_embeddings.requires_grad_(False)
|
| 266 |
+
|
| 267 |
+
# Now we fine tune the unet to better reconstruct the image
|
| 268 |
+
self.unet.requires_grad_(True)
|
| 269 |
+
self.unet.train()
|
| 270 |
+
optimizer = torch.optim.Adam(
|
| 271 |
+
self.unet.parameters(), # only optimize unet
|
| 272 |
+
lr=diffusion_model_learning_rate,
|
| 273 |
+
)
|
| 274 |
+
progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
|
| 275 |
+
|
| 276 |
+
logger.info("Next fine tuning the entire model to better reconstruct the init image")
|
| 277 |
+
for _ in range(model_fine_tuning_optimization_steps):
|
| 278 |
+
with accelerator.accumulate(self.unet.parameters()):
|
| 279 |
+
# Sample noise that we'll add to the latents
|
| 280 |
+
noise = torch.randn(image_latents.shape).to(image_latents.device)
|
| 281 |
+
timesteps = torch.randint(1000, (1,), device=image_latents.device)
|
| 282 |
+
|
| 283 |
+
# Add noise to the latents according to the noise magnitude at each timestep
|
| 284 |
+
# (this is the forward diffusion process)
|
| 285 |
+
noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
|
| 286 |
+
|
| 287 |
+
# Predict the noise residual
|
| 288 |
+
noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
|
| 289 |
+
|
| 290 |
+
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
|
| 291 |
+
accelerator.backward(loss)
|
| 292 |
+
|
| 293 |
+
optimizer.step()
|
| 294 |
+
optimizer.zero_grad()
|
| 295 |
+
|
| 296 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 297 |
+
if accelerator.sync_gradients:
|
| 298 |
+
progress_bar.update(1)
|
| 299 |
+
global_step += 1
|
| 300 |
+
|
| 301 |
+
logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
|
| 302 |
+
progress_bar.set_postfix(**logs)
|
| 303 |
+
accelerator.log(logs, step=global_step)
|
| 304 |
+
|
| 305 |
+
accelerator.wait_for_everyone()
|
| 306 |
+
self.text_embeddings_orig = text_embeddings_orig
|
| 307 |
+
self.text_embeddings = text_embeddings
|
| 308 |
+
|
| 309 |
+
@torch.no_grad()
|
| 310 |
+
def __call__(
|
| 311 |
+
self,
|
| 312 |
+
alpha: float = 1.2,
|
| 313 |
+
height: Optional[int] = 512,
|
| 314 |
+
width: Optional[int] = 512,
|
| 315 |
+
num_inference_steps: Optional[int] = 50,
|
| 316 |
+
generator: Optional[torch.Generator] = None,
|
| 317 |
+
output_type: Optional[str] = "pil",
|
| 318 |
+
return_dict: bool = True,
|
| 319 |
+
guidance_scale: float = 7.5,
|
| 320 |
+
eta: float = 0.0,
|
| 321 |
+
):
|
| 322 |
+
r"""
|
| 323 |
+
Function invoked when calling the pipeline for generation.
|
| 324 |
+
Args:
|
| 325 |
+
alpha (`float`, *optional*, defaults to 1.2):
|
| 326 |
+
The interpolation factor between the original and optimized text embeddings. A value closer to 0
|
| 327 |
+
will resemble the original input image.
|
| 328 |
+
height (`int`, *optional*, defaults to 512):
|
| 329 |
+
The height in pixels of the generated image.
|
| 330 |
+
width (`int`, *optional*, defaults to 512):
|
| 331 |
+
The width in pixels of the generated image.
|
| 332 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 333 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 334 |
+
expense of slower inference.
|
| 335 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 336 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 337 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 338 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 339 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 340 |
+
usually at the expense of lower image quality.
|
| 341 |
+
generator (`torch.Generator`, *optional*):
|
| 342 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 343 |
+
deterministic.
|
| 344 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 345 |
+
The output format of the generate image. Choose between
|
| 346 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
|
| 347 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 348 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 349 |
+
plain tuple.
|
| 350 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 351 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 352 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 353 |
+
Returns:
|
| 354 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 355 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 356 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 357 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 358 |
+
(nsfw) content, according to the `safety_checker`.
|
| 359 |
+
"""
|
| 360 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 361 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 362 |
+
if self.text_embeddings is None:
|
| 363 |
+
raise ValueError("Please run the pipe.train() before trying to generate an image.")
|
| 364 |
+
if self.text_embeddings_orig is None:
|
| 365 |
+
raise ValueError("Please run the pipe.train() before trying to generate an image.")
|
| 366 |
+
|
| 367 |
+
text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
|
| 368 |
+
|
| 369 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 370 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 371 |
+
# corresponds to doing no classifier free guidance.
|
| 372 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 373 |
+
# get unconditional embeddings for classifier free guidance
|
| 374 |
+
if do_classifier_free_guidance:
|
| 375 |
+
uncond_tokens = [""]
|
| 376 |
+
max_length = self.tokenizer.model_max_length
|
| 377 |
+
uncond_input = self.tokenizer(
|
| 378 |
+
uncond_tokens,
|
| 379 |
+
padding="max_length",
|
| 380 |
+
max_length=max_length,
|
| 381 |
+
truncation=True,
|
| 382 |
+
return_tensors="pt",
|
| 383 |
+
)
|
| 384 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 385 |
+
|
| 386 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 387 |
+
seq_len = uncond_embeddings.shape[1]
|
| 388 |
+
uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
|
| 389 |
+
|
| 390 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 391 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 392 |
+
# to avoid doing two forward passes
|
| 393 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 394 |
+
|
| 395 |
+
# get the initial random noise unless the user supplied it
|
| 396 |
+
|
| 397 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 398 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 399 |
+
# However this currently doesn't work in `mps`.
|
| 400 |
+
latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
|
| 401 |
+
latents_dtype = text_embeddings.dtype
|
| 402 |
+
if self.device.type == "mps":
|
| 403 |
+
# randn does not exist on mps
|
| 404 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 405 |
+
self.device
|
| 406 |
+
)
|
| 407 |
+
else:
|
| 408 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 409 |
+
|
| 410 |
+
# set timesteps
|
| 411 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 412 |
+
|
| 413 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 414 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 415 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 416 |
+
|
| 417 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 418 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 419 |
+
|
| 420 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 421 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 422 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 423 |
+
# and should be between [0, 1]
|
| 424 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 425 |
+
extra_step_kwargs = {}
|
| 426 |
+
if accepts_eta:
|
| 427 |
+
extra_step_kwargs["eta"] = eta
|
| 428 |
+
|
| 429 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 430 |
+
# expand the latents if we are doing classifier free guidance
|
| 431 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 432 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 433 |
+
|
| 434 |
+
# predict the noise residual
|
| 435 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 436 |
+
|
| 437 |
+
# perform guidance
|
| 438 |
+
if do_classifier_free_guidance:
|
| 439 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 440 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 441 |
+
|
| 442 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 443 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 444 |
+
|
| 445 |
+
latents = 1 / 0.18215 * latents
|
| 446 |
+
image = self.vae.decode(latents).sample
|
| 447 |
+
|
| 448 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 449 |
+
|
| 450 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 451 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 452 |
+
|
| 453 |
+
if self.safety_checker is not None:
|
| 454 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 455 |
+
self.device
|
| 456 |
+
)
|
| 457 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 458 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 459 |
+
)
|
| 460 |
+
else:
|
| 461 |
+
has_nsfw_concept = None
|
| 462 |
+
|
| 463 |
+
if output_type == "pil":
|
| 464 |
+
image = self.numpy_to_pil(image)
|
| 465 |
+
|
| 466 |
+
if not return_dict:
|
| 467 |
+
return (image, has_nsfw_concept)
|
| 468 |
+
|
| 469 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/img2img_inpainting.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Callable, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import torch
|
| 7 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 8 |
+
|
| 9 |
+
from diffusers import DiffusionPipeline
|
| 10 |
+
from diffusers.configuration_utils import FrozenDict
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 13 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 14 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 15 |
+
from diffusers.utils import deprecate, logging
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def prepare_mask_and_masked_image(image, mask):
|
| 22 |
+
image = np.array(image.convert("RGB"))
|
| 23 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 24 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 25 |
+
|
| 26 |
+
mask = np.array(mask.convert("L"))
|
| 27 |
+
mask = mask.astype(np.float32) / 255.0
|
| 28 |
+
mask = mask[None, None]
|
| 29 |
+
mask[mask < 0.5] = 0
|
| 30 |
+
mask[mask >= 0.5] = 1
|
| 31 |
+
mask = torch.from_numpy(mask)
|
| 32 |
+
|
| 33 |
+
masked_image = image * (mask < 0.5)
|
| 34 |
+
|
| 35 |
+
return mask, masked_image
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def check_size(image, height, width):
|
| 39 |
+
if isinstance(image, PIL.Image.Image):
|
| 40 |
+
w, h = image.size
|
| 41 |
+
elif isinstance(image, torch.Tensor):
|
| 42 |
+
*_, h, w = image.shape
|
| 43 |
+
|
| 44 |
+
if h != height or w != width:
|
| 45 |
+
raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
|
| 49 |
+
inner_image = inner_image.convert("RGBA")
|
| 50 |
+
image = image.convert("RGB")
|
| 51 |
+
|
| 52 |
+
image.paste(inner_image, paste_offset, inner_image)
|
| 53 |
+
image = image.convert("RGB")
|
| 54 |
+
|
| 55 |
+
return image
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ImageToImageInpaintingPipeline(DiffusionPipeline):
|
| 59 |
+
r"""
|
| 60 |
+
Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
|
| 61 |
+
|
| 62 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 63 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
vae ([`AutoencoderKL`]):
|
| 67 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 68 |
+
text_encoder ([`CLIPTextModel`]):
|
| 69 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 70 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 71 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 72 |
+
tokenizer (`CLIPTokenizer`):
|
| 73 |
+
Tokenizer of class
|
| 74 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 75 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 76 |
+
scheduler ([`SchedulerMixin`]):
|
| 77 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 78 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 82 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
vae: AutoencoderKL,
|
| 89 |
+
text_encoder: CLIPTextModel,
|
| 90 |
+
tokenizer: CLIPTokenizer,
|
| 91 |
+
unet: UNet2DConditionModel,
|
| 92 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 93 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 94 |
+
feature_extractor: CLIPImageProcessor,
|
| 95 |
+
):
|
| 96 |
+
super().__init__()
|
| 97 |
+
|
| 98 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 99 |
+
deprecation_message = (
|
| 100 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 101 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 102 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 103 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 104 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 105 |
+
" file"
|
| 106 |
+
)
|
| 107 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 108 |
+
new_config = dict(scheduler.config)
|
| 109 |
+
new_config["steps_offset"] = 1
|
| 110 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 111 |
+
|
| 112 |
+
if safety_checker is None:
|
| 113 |
+
logger.warning(
|
| 114 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 115 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 116 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 117 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 118 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 119 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
self.register_modules(
|
| 123 |
+
vae=vae,
|
| 124 |
+
text_encoder=text_encoder,
|
| 125 |
+
tokenizer=tokenizer,
|
| 126 |
+
unet=unet,
|
| 127 |
+
scheduler=scheduler,
|
| 128 |
+
safety_checker=safety_checker,
|
| 129 |
+
feature_extractor=feature_extractor,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
@torch.no_grad()
|
| 133 |
+
def __call__(
|
| 134 |
+
self,
|
| 135 |
+
prompt: Union[str, List[str]],
|
| 136 |
+
image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 137 |
+
inner_image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 138 |
+
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 139 |
+
height: int = 512,
|
| 140 |
+
width: int = 512,
|
| 141 |
+
num_inference_steps: int = 50,
|
| 142 |
+
guidance_scale: float = 7.5,
|
| 143 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 144 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 145 |
+
eta: float = 0.0,
|
| 146 |
+
generator: Optional[torch.Generator] = None,
|
| 147 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 148 |
+
output_type: Optional[str] = "pil",
|
| 149 |
+
return_dict: bool = True,
|
| 150 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 151 |
+
callback_steps: int = 1,
|
| 152 |
+
**kwargs,
|
| 153 |
+
):
|
| 154 |
+
r"""
|
| 155 |
+
Function invoked when calling the pipeline for generation.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
prompt (`str` or `List[str]`):
|
| 159 |
+
The prompt or prompts to guide the image generation.
|
| 160 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 161 |
+
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
| 162 |
+
be masked out with `mask_image` and repainted according to `prompt`.
|
| 163 |
+
inner_image (`torch.Tensor` or `PIL.Image.Image`):
|
| 164 |
+
`Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
|
| 165 |
+
regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
|
| 166 |
+
the last channel representing the alpha channel, which will be used to blend `inner_image` with
|
| 167 |
+
`image`. If not provided, it will be forcibly cast to RGBA.
|
| 168 |
+
mask_image (`PIL.Image.Image`):
|
| 169 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 170 |
+
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
| 171 |
+
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
| 172 |
+
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 173 |
+
height (`int`, *optional*, defaults to 512):
|
| 174 |
+
The height in pixels of the generated image.
|
| 175 |
+
width (`int`, *optional*, defaults to 512):
|
| 176 |
+
The width in pixels of the generated image.
|
| 177 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 178 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 179 |
+
expense of slower inference.
|
| 180 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 181 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 182 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 183 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 184 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 185 |
+
usually at the expense of lower image quality.
|
| 186 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 187 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 188 |
+
if `guidance_scale` is less than `1`).
|
| 189 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 190 |
+
The number of images to generate per prompt.
|
| 191 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 192 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 193 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 194 |
+
generator (`torch.Generator`, *optional*):
|
| 195 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 196 |
+
deterministic.
|
| 197 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 198 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 199 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 200 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 201 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 202 |
+
The output format of the generate image. Choose between
|
| 203 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 204 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 205 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 206 |
+
plain tuple.
|
| 207 |
+
callback (`Callable`, *optional*):
|
| 208 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 209 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 210 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 211 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 212 |
+
called at every step.
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 216 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 217 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 218 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 219 |
+
(nsfw) content, according to the `safety_checker`.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
if isinstance(prompt, str):
|
| 223 |
+
batch_size = 1
|
| 224 |
+
elif isinstance(prompt, list):
|
| 225 |
+
batch_size = len(prompt)
|
| 226 |
+
else:
|
| 227 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 228 |
+
|
| 229 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 230 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 231 |
+
|
| 232 |
+
if (callback_steps is None) or (
|
| 233 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 234 |
+
):
|
| 235 |
+
raise ValueError(
|
| 236 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 237 |
+
f" {type(callback_steps)}."
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# check if input sizes are correct
|
| 241 |
+
check_size(image, height, width)
|
| 242 |
+
check_size(inner_image, height, width)
|
| 243 |
+
check_size(mask_image, height, width)
|
| 244 |
+
|
| 245 |
+
# get prompt text embeddings
|
| 246 |
+
text_inputs = self.tokenizer(
|
| 247 |
+
prompt,
|
| 248 |
+
padding="max_length",
|
| 249 |
+
max_length=self.tokenizer.model_max_length,
|
| 250 |
+
return_tensors="pt",
|
| 251 |
+
)
|
| 252 |
+
text_input_ids = text_inputs.input_ids
|
| 253 |
+
|
| 254 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 255 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 256 |
+
logger.warning(
|
| 257 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 258 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 259 |
+
)
|
| 260 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 261 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 262 |
+
|
| 263 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 264 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 265 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 266 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 267 |
+
|
| 268 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 269 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 270 |
+
# corresponds to doing no classifier free guidance.
|
| 271 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 272 |
+
# get unconditional embeddings for classifier free guidance
|
| 273 |
+
if do_classifier_free_guidance:
|
| 274 |
+
uncond_tokens: List[str]
|
| 275 |
+
if negative_prompt is None:
|
| 276 |
+
uncond_tokens = [""]
|
| 277 |
+
elif type(prompt) is not type(negative_prompt):
|
| 278 |
+
raise TypeError(
|
| 279 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 280 |
+
f" {type(prompt)}."
|
| 281 |
+
)
|
| 282 |
+
elif isinstance(negative_prompt, str):
|
| 283 |
+
uncond_tokens = [negative_prompt]
|
| 284 |
+
elif batch_size != len(negative_prompt):
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 287 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 288 |
+
" the batch size of `prompt`."
|
| 289 |
+
)
|
| 290 |
+
else:
|
| 291 |
+
uncond_tokens = negative_prompt
|
| 292 |
+
|
| 293 |
+
max_length = text_input_ids.shape[-1]
|
| 294 |
+
uncond_input = self.tokenizer(
|
| 295 |
+
uncond_tokens,
|
| 296 |
+
padding="max_length",
|
| 297 |
+
max_length=max_length,
|
| 298 |
+
truncation=True,
|
| 299 |
+
return_tensors="pt",
|
| 300 |
+
)
|
| 301 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 302 |
+
|
| 303 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 304 |
+
seq_len = uncond_embeddings.shape[1]
|
| 305 |
+
uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
|
| 306 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 307 |
+
|
| 308 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 309 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 310 |
+
# to avoid doing two forward passes
|
| 311 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 312 |
+
|
| 313 |
+
# get the initial random noise unless the user supplied it
|
| 314 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 315 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 316 |
+
# However this currently doesn't work in `mps`.
|
| 317 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 318 |
+
latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
|
| 319 |
+
latents_dtype = text_embeddings.dtype
|
| 320 |
+
if latents is None:
|
| 321 |
+
if self.device.type == "mps":
|
| 322 |
+
# randn does not exist on mps
|
| 323 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 324 |
+
self.device
|
| 325 |
+
)
|
| 326 |
+
else:
|
| 327 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 328 |
+
else:
|
| 329 |
+
if latents.shape != latents_shape:
|
| 330 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 331 |
+
latents = latents.to(self.device)
|
| 332 |
+
|
| 333 |
+
# overlay the inner image
|
| 334 |
+
image = overlay_inner_image(image, inner_image)
|
| 335 |
+
|
| 336 |
+
# prepare mask and masked_image
|
| 337 |
+
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
|
| 338 |
+
mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
|
| 339 |
+
masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
|
| 340 |
+
|
| 341 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 342 |
+
mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
|
| 343 |
+
|
| 344 |
+
# encode the mask image into latents space so we can concatenate it to the latents
|
| 345 |
+
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
|
| 346 |
+
masked_image_latents = 0.18215 * masked_image_latents
|
| 347 |
+
|
| 348 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 349 |
+
mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
|
| 350 |
+
masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
|
| 351 |
+
|
| 352 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 353 |
+
masked_image_latents = (
|
| 354 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
num_channels_mask = mask.shape[1]
|
| 358 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 359 |
+
|
| 360 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 361 |
+
raise ValueError(
|
| 362 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 363 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 364 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 365 |
+
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
|
| 366 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# set timesteps
|
| 370 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 371 |
+
|
| 372 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 373 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 374 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 375 |
+
|
| 376 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 377 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 378 |
+
|
| 379 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 380 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 381 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 382 |
+
# and should be between [0, 1]
|
| 383 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 384 |
+
extra_step_kwargs = {}
|
| 385 |
+
if accepts_eta:
|
| 386 |
+
extra_step_kwargs["eta"] = eta
|
| 387 |
+
|
| 388 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 389 |
+
# expand the latents if we are doing classifier free guidance
|
| 390 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 391 |
+
|
| 392 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 393 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 394 |
+
|
| 395 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 396 |
+
|
| 397 |
+
# predict the noise residual
|
| 398 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 399 |
+
|
| 400 |
+
# perform guidance
|
| 401 |
+
if do_classifier_free_guidance:
|
| 402 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 403 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 404 |
+
|
| 405 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 406 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 407 |
+
|
| 408 |
+
# call the callback, if provided
|
| 409 |
+
if callback is not None and i % callback_steps == 0:
|
| 410 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 411 |
+
callback(step_idx, t, latents)
|
| 412 |
+
|
| 413 |
+
latents = 1 / 0.18215 * latents
|
| 414 |
+
image = self.vae.decode(latents).sample
|
| 415 |
+
|
| 416 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 417 |
+
|
| 418 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 419 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 420 |
+
|
| 421 |
+
if self.safety_checker is not None:
|
| 422 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 423 |
+
self.device
|
| 424 |
+
)
|
| 425 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 426 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 427 |
+
)
|
| 428 |
+
else:
|
| 429 |
+
has_nsfw_concept = None
|
| 430 |
+
|
| 431 |
+
if output_type == "pil":
|
| 432 |
+
image = self.numpy_to_pil(image)
|
| 433 |
+
|
| 434 |
+
if not return_dict:
|
| 435 |
+
return (image, has_nsfw_concept)
|
| 436 |
+
|
| 437 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/instaflow_one_step.py
ADDED
|
@@ -0,0 +1,680 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from packaging import version
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 21 |
+
|
| 22 |
+
from diffusers.configuration_utils import FrozenDict
|
| 23 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 24 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 25 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 26 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 27 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 28 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 29 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 30 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 31 |
+
from diffusers.utils import (
|
| 32 |
+
deprecate,
|
| 33 |
+
logging,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 42 |
+
"""
|
| 43 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 44 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 45 |
+
"""
|
| 46 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 47 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 48 |
+
# rescale the results from guidance (fixes overexposure)
|
| 49 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 50 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 51 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 52 |
+
return noise_cfg
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class InstaFlowPipeline(
|
| 56 |
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 57 |
+
):
|
| 58 |
+
r"""
|
| 59 |
+
Pipeline for text-to-image generation using Rectified Flow and Euler discretization.
|
| 60 |
+
This customized pipeline is based on StableDiffusionPipeline from the official Diffusers library (0.21.4)
|
| 61 |
+
|
| 62 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 63 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 64 |
+
|
| 65 |
+
The pipeline also inherits the following loading methods:
|
| 66 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 67 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 68 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 69 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
vae ([`AutoencoderKL`]):
|
| 73 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 74 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 75 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 76 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 77 |
+
A `CLIPTokenizer` to tokenize text.
|
| 78 |
+
unet ([`UNet2DConditionModel`]):
|
| 79 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 80 |
+
scheduler ([`SchedulerMixin`]):
|
| 81 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 82 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 83 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 84 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 85 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 86 |
+
about a model's potential harms.
|
| 87 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 88 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 92 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 93 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 94 |
+
|
| 95 |
+
def __init__(
|
| 96 |
+
self,
|
| 97 |
+
vae: AutoencoderKL,
|
| 98 |
+
text_encoder: CLIPTextModel,
|
| 99 |
+
tokenizer: CLIPTokenizer,
|
| 100 |
+
unet: UNet2DConditionModel,
|
| 101 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 102 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 103 |
+
feature_extractor: CLIPImageProcessor,
|
| 104 |
+
requires_safety_checker: bool = True,
|
| 105 |
+
):
|
| 106 |
+
super().__init__()
|
| 107 |
+
|
| 108 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 109 |
+
deprecation_message = (
|
| 110 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 111 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 112 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 113 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 114 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 115 |
+
" file"
|
| 116 |
+
)
|
| 117 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 118 |
+
new_config = dict(scheduler.config)
|
| 119 |
+
new_config["steps_offset"] = 1
|
| 120 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 121 |
+
|
| 122 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 123 |
+
deprecation_message = (
|
| 124 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 125 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 126 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 127 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 128 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 129 |
+
)
|
| 130 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 131 |
+
new_config = dict(scheduler.config)
|
| 132 |
+
new_config["clip_sample"] = False
|
| 133 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 134 |
+
|
| 135 |
+
if safety_checker is None and requires_safety_checker:
|
| 136 |
+
logger.warning(
|
| 137 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 138 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 139 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 140 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 141 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 142 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
if safety_checker is not None and feature_extractor is None:
|
| 146 |
+
raise ValueError(
|
| 147 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 148 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 152 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 153 |
+
) < version.parse("0.9.0.dev0")
|
| 154 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 155 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 156 |
+
deprecation_message = (
|
| 157 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 158 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 159 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 160 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 161 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 162 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 163 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 164 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 165 |
+
" the `unet/config.json` file"
|
| 166 |
+
)
|
| 167 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 168 |
+
new_config = dict(unet.config)
|
| 169 |
+
new_config["sample_size"] = 64
|
| 170 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 171 |
+
|
| 172 |
+
self.register_modules(
|
| 173 |
+
vae=vae,
|
| 174 |
+
text_encoder=text_encoder,
|
| 175 |
+
tokenizer=tokenizer,
|
| 176 |
+
unet=unet,
|
| 177 |
+
scheduler=scheduler,
|
| 178 |
+
safety_checker=safety_checker,
|
| 179 |
+
feature_extractor=feature_extractor,
|
| 180 |
+
)
|
| 181 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 182 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 183 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 184 |
+
|
| 185 |
+
def _encode_prompt(
|
| 186 |
+
self,
|
| 187 |
+
prompt,
|
| 188 |
+
device,
|
| 189 |
+
num_images_per_prompt,
|
| 190 |
+
do_classifier_free_guidance,
|
| 191 |
+
negative_prompt=None,
|
| 192 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 193 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
lora_scale: Optional[float] = None,
|
| 195 |
+
):
|
| 196 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 197 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 198 |
+
|
| 199 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 200 |
+
prompt=prompt,
|
| 201 |
+
device=device,
|
| 202 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 203 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 204 |
+
negative_prompt=negative_prompt,
|
| 205 |
+
prompt_embeds=prompt_embeds,
|
| 206 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 207 |
+
lora_scale=lora_scale,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# concatenate for backwards comp
|
| 211 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 212 |
+
|
| 213 |
+
return prompt_embeds
|
| 214 |
+
|
| 215 |
+
def encode_prompt(
|
| 216 |
+
self,
|
| 217 |
+
prompt,
|
| 218 |
+
device,
|
| 219 |
+
num_images_per_prompt,
|
| 220 |
+
do_classifier_free_guidance,
|
| 221 |
+
negative_prompt=None,
|
| 222 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 223 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 224 |
+
lora_scale: Optional[float] = None,
|
| 225 |
+
):
|
| 226 |
+
r"""
|
| 227 |
+
Encodes the prompt into text encoder hidden states.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 231 |
+
prompt to be encoded
|
| 232 |
+
device: (`torch.device`):
|
| 233 |
+
torch device
|
| 234 |
+
num_images_per_prompt (`int`):
|
| 235 |
+
number of images that should be generated per prompt
|
| 236 |
+
do_classifier_free_guidance (`bool`):
|
| 237 |
+
whether to use classifier free guidance or not
|
| 238 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 239 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 240 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 241 |
+
less than `1`).
|
| 242 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 243 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 244 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 245 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 246 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 247 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 248 |
+
argument.
|
| 249 |
+
lora_scale (`float`, *optional*):
|
| 250 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 251 |
+
"""
|
| 252 |
+
# set lora scale so that monkey patched LoRA
|
| 253 |
+
# function of text encoder can correctly access it
|
| 254 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 255 |
+
self._lora_scale = lora_scale
|
| 256 |
+
|
| 257 |
+
# dynamically adjust the LoRA scale
|
| 258 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 259 |
+
|
| 260 |
+
if prompt is not None and isinstance(prompt, str):
|
| 261 |
+
batch_size = 1
|
| 262 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 263 |
+
batch_size = len(prompt)
|
| 264 |
+
else:
|
| 265 |
+
batch_size = prompt_embeds.shape[0]
|
| 266 |
+
|
| 267 |
+
if prompt_embeds is None:
|
| 268 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 269 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 270 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 271 |
+
|
| 272 |
+
text_inputs = self.tokenizer(
|
| 273 |
+
prompt,
|
| 274 |
+
padding="max_length",
|
| 275 |
+
max_length=self.tokenizer.model_max_length,
|
| 276 |
+
truncation=True,
|
| 277 |
+
return_tensors="pt",
|
| 278 |
+
)
|
| 279 |
+
text_input_ids = text_inputs.input_ids
|
| 280 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 281 |
+
|
| 282 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 283 |
+
text_input_ids, untruncated_ids
|
| 284 |
+
):
|
| 285 |
+
removed_text = self.tokenizer.batch_decode(
|
| 286 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 287 |
+
)
|
| 288 |
+
logger.warning(
|
| 289 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 290 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 294 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 295 |
+
else:
|
| 296 |
+
attention_mask = None
|
| 297 |
+
|
| 298 |
+
prompt_embeds = self.text_encoder(
|
| 299 |
+
text_input_ids.to(device),
|
| 300 |
+
attention_mask=attention_mask,
|
| 301 |
+
)
|
| 302 |
+
prompt_embeds = prompt_embeds[0]
|
| 303 |
+
|
| 304 |
+
if self.text_encoder is not None:
|
| 305 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 306 |
+
elif self.unet is not None:
|
| 307 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 308 |
+
else:
|
| 309 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 310 |
+
|
| 311 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 312 |
+
|
| 313 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 314 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 315 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 316 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 317 |
+
|
| 318 |
+
# get unconditional embeddings for classifier free guidance
|
| 319 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 320 |
+
uncond_tokens: List[str]
|
| 321 |
+
if negative_prompt is None:
|
| 322 |
+
uncond_tokens = [""] * batch_size
|
| 323 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 324 |
+
raise TypeError(
|
| 325 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 326 |
+
f" {type(prompt)}."
|
| 327 |
+
)
|
| 328 |
+
elif isinstance(negative_prompt, str):
|
| 329 |
+
uncond_tokens = [negative_prompt]
|
| 330 |
+
elif batch_size != len(negative_prompt):
|
| 331 |
+
raise ValueError(
|
| 332 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 333 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 334 |
+
" the batch size of `prompt`."
|
| 335 |
+
)
|
| 336 |
+
else:
|
| 337 |
+
uncond_tokens = negative_prompt
|
| 338 |
+
|
| 339 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 340 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 341 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 342 |
+
|
| 343 |
+
max_length = prompt_embeds.shape[1]
|
| 344 |
+
uncond_input = self.tokenizer(
|
| 345 |
+
uncond_tokens,
|
| 346 |
+
padding="max_length",
|
| 347 |
+
max_length=max_length,
|
| 348 |
+
truncation=True,
|
| 349 |
+
return_tensors="pt",
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 353 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 354 |
+
else:
|
| 355 |
+
attention_mask = None
|
| 356 |
+
|
| 357 |
+
negative_prompt_embeds = self.text_encoder(
|
| 358 |
+
uncond_input.input_ids.to(device),
|
| 359 |
+
attention_mask=attention_mask,
|
| 360 |
+
)
|
| 361 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 362 |
+
|
| 363 |
+
if do_classifier_free_guidance:
|
| 364 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 365 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 366 |
+
|
| 367 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 368 |
+
|
| 369 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 370 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 371 |
+
|
| 372 |
+
return prompt_embeds, negative_prompt_embeds
|
| 373 |
+
|
| 374 |
+
def run_safety_checker(self, image, device, dtype):
|
| 375 |
+
if self.safety_checker is None:
|
| 376 |
+
has_nsfw_concept = None
|
| 377 |
+
else:
|
| 378 |
+
if torch.is_tensor(image):
|
| 379 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 380 |
+
else:
|
| 381 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 382 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 383 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 384 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 385 |
+
)
|
| 386 |
+
return image, has_nsfw_concept
|
| 387 |
+
|
| 388 |
+
def decode_latents(self, latents):
|
| 389 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 390 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 391 |
+
|
| 392 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 393 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 394 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 395 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 396 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 397 |
+
return image
|
| 398 |
+
|
| 399 |
+
def merge_dW_to_unet(pipe, dW_dict, alpha=1.0):
|
| 400 |
+
_tmp_sd = pipe.unet.state_dict()
|
| 401 |
+
for key in dW_dict.keys():
|
| 402 |
+
_tmp_sd[key] += dW_dict[key] * alpha
|
| 403 |
+
pipe.unet.load_state_dict(_tmp_sd, strict=False)
|
| 404 |
+
return pipe
|
| 405 |
+
|
| 406 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 407 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 408 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 409 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 410 |
+
# and should be between [0, 1]
|
| 411 |
+
|
| 412 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 413 |
+
extra_step_kwargs = {}
|
| 414 |
+
if accepts_eta:
|
| 415 |
+
extra_step_kwargs["eta"] = eta
|
| 416 |
+
|
| 417 |
+
# check if the scheduler accepts generator
|
| 418 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 419 |
+
if accepts_generator:
|
| 420 |
+
extra_step_kwargs["generator"] = generator
|
| 421 |
+
return extra_step_kwargs
|
| 422 |
+
|
| 423 |
+
def check_inputs(
|
| 424 |
+
self,
|
| 425 |
+
prompt,
|
| 426 |
+
height,
|
| 427 |
+
width,
|
| 428 |
+
callback_steps,
|
| 429 |
+
negative_prompt=None,
|
| 430 |
+
prompt_embeds=None,
|
| 431 |
+
negative_prompt_embeds=None,
|
| 432 |
+
):
|
| 433 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 434 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 435 |
+
|
| 436 |
+
if (callback_steps is None) or (
|
| 437 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 438 |
+
):
|
| 439 |
+
raise ValueError(
|
| 440 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 441 |
+
f" {type(callback_steps)}."
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
if prompt is not None and prompt_embeds is not None:
|
| 445 |
+
raise ValueError(
|
| 446 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 447 |
+
" only forward one of the two."
|
| 448 |
+
)
|
| 449 |
+
elif prompt is None and prompt_embeds is None:
|
| 450 |
+
raise ValueError(
|
| 451 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 452 |
+
)
|
| 453 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 454 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 455 |
+
|
| 456 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 457 |
+
raise ValueError(
|
| 458 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 459 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 463 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 464 |
+
raise ValueError(
|
| 465 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 466 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 467 |
+
f" {negative_prompt_embeds.shape}."
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 471 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 472 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 473 |
+
raise ValueError(
|
| 474 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 475 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
if latents is None:
|
| 479 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 480 |
+
else:
|
| 481 |
+
latents = latents.to(device)
|
| 482 |
+
|
| 483 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 484 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 485 |
+
return latents
|
| 486 |
+
|
| 487 |
+
@torch.no_grad()
|
| 488 |
+
def __call__(
|
| 489 |
+
self,
|
| 490 |
+
prompt: Union[str, List[str]] = None,
|
| 491 |
+
height: Optional[int] = None,
|
| 492 |
+
width: Optional[int] = None,
|
| 493 |
+
num_inference_steps: int = 50,
|
| 494 |
+
guidance_scale: float = 7.5,
|
| 495 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 496 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 497 |
+
eta: float = 0.0,
|
| 498 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 499 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 500 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 501 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 502 |
+
output_type: Optional[str] = "pil",
|
| 503 |
+
return_dict: bool = True,
|
| 504 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 505 |
+
callback_steps: int = 1,
|
| 506 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 507 |
+
guidance_rescale: float = 0.0,
|
| 508 |
+
):
|
| 509 |
+
r"""
|
| 510 |
+
The call function to the pipeline for generation.
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 514 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 515 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 516 |
+
The height in pixels of the generated image.
|
| 517 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 518 |
+
The width in pixels of the generated image.
|
| 519 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 520 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 521 |
+
expense of slower inference.
|
| 522 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 523 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 524 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 525 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 526 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 527 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 528 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 529 |
+
The number of images to generate per prompt.
|
| 530 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 531 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 532 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 533 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 534 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 535 |
+
generation deterministic.
|
| 536 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 537 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 538 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 539 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 540 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 541 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 542 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 543 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 544 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 545 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 546 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 547 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 548 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 549 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 550 |
+
plain tuple.
|
| 551 |
+
callback (`Callable`, *optional*):
|
| 552 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 553 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 554 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 555 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 556 |
+
every step.
|
| 557 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 558 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 559 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 560 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 561 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 562 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 563 |
+
using zero terminal SNR.
|
| 564 |
+
|
| 565 |
+
Examples:
|
| 566 |
+
|
| 567 |
+
Returns:
|
| 568 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 569 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 570 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 571 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 572 |
+
"not-safe-for-work" (nsfw) content.
|
| 573 |
+
"""
|
| 574 |
+
# 0. Default height and width to unet
|
| 575 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 576 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 577 |
+
|
| 578 |
+
# 1. Check inputs. Raise error if not correct
|
| 579 |
+
self.check_inputs(
|
| 580 |
+
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
# 2. Define call parameters
|
| 584 |
+
if prompt is not None and isinstance(prompt, str):
|
| 585 |
+
batch_size = 1
|
| 586 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 587 |
+
batch_size = len(prompt)
|
| 588 |
+
else:
|
| 589 |
+
batch_size = prompt_embeds.shape[0]
|
| 590 |
+
|
| 591 |
+
device = self._execution_device
|
| 592 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 593 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 594 |
+
# corresponds to doing no classifier free guidance.
|
| 595 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 596 |
+
|
| 597 |
+
# 3. Encode input prompt
|
| 598 |
+
text_encoder_lora_scale = (
|
| 599 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 600 |
+
)
|
| 601 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 602 |
+
prompt,
|
| 603 |
+
device,
|
| 604 |
+
num_images_per_prompt,
|
| 605 |
+
do_classifier_free_guidance,
|
| 606 |
+
negative_prompt,
|
| 607 |
+
prompt_embeds=prompt_embeds,
|
| 608 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 609 |
+
lora_scale=text_encoder_lora_scale,
|
| 610 |
+
)
|
| 611 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 612 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 613 |
+
# to avoid doing two forward passes
|
| 614 |
+
if do_classifier_free_guidance:
|
| 615 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 616 |
+
|
| 617 |
+
# 4. Prepare timesteps
|
| 618 |
+
timesteps = [(1.0 - i / num_inference_steps) * 1000.0 for i in range(num_inference_steps)]
|
| 619 |
+
|
| 620 |
+
# 5. Prepare latent variables
|
| 621 |
+
num_channels_latents = self.unet.config.in_channels
|
| 622 |
+
latents = self.prepare_latents(
|
| 623 |
+
batch_size * num_images_per_prompt,
|
| 624 |
+
num_channels_latents,
|
| 625 |
+
height,
|
| 626 |
+
width,
|
| 627 |
+
prompt_embeds.dtype,
|
| 628 |
+
device,
|
| 629 |
+
generator,
|
| 630 |
+
latents,
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 634 |
+
dt = 1.0 / num_inference_steps
|
| 635 |
+
|
| 636 |
+
# 7. Denoising loop of Euler discretization from t = 0 to t = 1
|
| 637 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 638 |
+
for i, t in enumerate(timesteps):
|
| 639 |
+
# expand the latents if we are doing classifier free guidance
|
| 640 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 641 |
+
|
| 642 |
+
vec_t = torch.ones((latent_model_input.shape[0],), device=latents.device) * t
|
| 643 |
+
|
| 644 |
+
v_pred = self.unet(latent_model_input, vec_t, encoder_hidden_states=prompt_embeds).sample
|
| 645 |
+
|
| 646 |
+
# perform guidance
|
| 647 |
+
if do_classifier_free_guidance:
|
| 648 |
+
v_pred_neg, v_pred_text = v_pred.chunk(2)
|
| 649 |
+
v_pred = v_pred_neg + guidance_scale * (v_pred_text - v_pred_neg)
|
| 650 |
+
|
| 651 |
+
latents = latents + dt * v_pred
|
| 652 |
+
|
| 653 |
+
# call the callback, if provided
|
| 654 |
+
if i == len(timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
|
| 655 |
+
progress_bar.update()
|
| 656 |
+
if callback is not None and i % callback_steps == 0:
|
| 657 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 658 |
+
callback(step_idx, t, latents)
|
| 659 |
+
|
| 660 |
+
if not output_type == "latent":
|
| 661 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 662 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 663 |
+
else:
|
| 664 |
+
image = latents
|
| 665 |
+
has_nsfw_concept = None
|
| 666 |
+
|
| 667 |
+
if has_nsfw_concept is None:
|
| 668 |
+
do_denormalize = [True] * image.shape[0]
|
| 669 |
+
else:
|
| 670 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 671 |
+
|
| 672 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 673 |
+
|
| 674 |
+
# Offload all models
|
| 675 |
+
self.maybe_free_model_hooks()
|
| 676 |
+
|
| 677 |
+
if not return_dict:
|
| 678 |
+
return (image, has_nsfw_concept)
|
| 679 |
+
|
| 680 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/interpolate_stable_diffusion.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import time
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Callable, List, Optional, Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 9 |
+
|
| 10 |
+
from diffusers.configuration_utils import FrozenDict
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 14 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 15 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 16 |
+
from diffusers.utils import deprecate, logging
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
|
| 23 |
+
"""helper function to spherically interpolate two arrays v1 v2"""
|
| 24 |
+
|
| 25 |
+
if not isinstance(v0, np.ndarray):
|
| 26 |
+
inputs_are_torch = True
|
| 27 |
+
input_device = v0.device
|
| 28 |
+
v0 = v0.cpu().numpy()
|
| 29 |
+
v1 = v1.cpu().numpy()
|
| 30 |
+
|
| 31 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 32 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 33 |
+
v2 = (1 - t) * v0 + t * v1
|
| 34 |
+
else:
|
| 35 |
+
theta_0 = np.arccos(dot)
|
| 36 |
+
sin_theta_0 = np.sin(theta_0)
|
| 37 |
+
theta_t = theta_0 * t
|
| 38 |
+
sin_theta_t = np.sin(theta_t)
|
| 39 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 40 |
+
s1 = sin_theta_t / sin_theta_0
|
| 41 |
+
v2 = s0 * v0 + s1 * v1
|
| 42 |
+
|
| 43 |
+
if inputs_are_torch:
|
| 44 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 45 |
+
|
| 46 |
+
return v2
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 50 |
+
r"""
|
| 51 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 52 |
+
|
| 53 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 54 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
vae ([`AutoencoderKL`]):
|
| 58 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 59 |
+
text_encoder ([`CLIPTextModel`]):
|
| 60 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 61 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 62 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 63 |
+
tokenizer (`CLIPTokenizer`):
|
| 64 |
+
Tokenizer of class
|
| 65 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 66 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 67 |
+
scheduler ([`SchedulerMixin`]):
|
| 68 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 69 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 70 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 71 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 72 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 73 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 74 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(
|
| 78 |
+
self,
|
| 79 |
+
vae: AutoencoderKL,
|
| 80 |
+
text_encoder: CLIPTextModel,
|
| 81 |
+
tokenizer: CLIPTokenizer,
|
| 82 |
+
unet: UNet2DConditionModel,
|
| 83 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 84 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 85 |
+
feature_extractor: CLIPImageProcessor,
|
| 86 |
+
):
|
| 87 |
+
super().__init__()
|
| 88 |
+
|
| 89 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 90 |
+
deprecation_message = (
|
| 91 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 92 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 93 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 94 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 95 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 96 |
+
" file"
|
| 97 |
+
)
|
| 98 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 99 |
+
new_config = dict(scheduler.config)
|
| 100 |
+
new_config["steps_offset"] = 1
|
| 101 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 102 |
+
|
| 103 |
+
if safety_checker is None:
|
| 104 |
+
logger.warning(
|
| 105 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 106 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 107 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 108 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 109 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 110 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
self.register_modules(
|
| 114 |
+
vae=vae,
|
| 115 |
+
text_encoder=text_encoder,
|
| 116 |
+
tokenizer=tokenizer,
|
| 117 |
+
unet=unet,
|
| 118 |
+
scheduler=scheduler,
|
| 119 |
+
safety_checker=safety_checker,
|
| 120 |
+
feature_extractor=feature_extractor,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
@torch.no_grad()
|
| 124 |
+
def __call__(
|
| 125 |
+
self,
|
| 126 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 127 |
+
height: int = 512,
|
| 128 |
+
width: int = 512,
|
| 129 |
+
num_inference_steps: int = 50,
|
| 130 |
+
guidance_scale: float = 7.5,
|
| 131 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 132 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 133 |
+
eta: float = 0.0,
|
| 134 |
+
generator: Optional[torch.Generator] = None,
|
| 135 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 136 |
+
output_type: Optional[str] = "pil",
|
| 137 |
+
return_dict: bool = True,
|
| 138 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 139 |
+
callback_steps: int = 1,
|
| 140 |
+
text_embeddings: Optional[torch.FloatTensor] = None,
|
| 141 |
+
**kwargs,
|
| 142 |
+
):
|
| 143 |
+
r"""
|
| 144 |
+
Function invoked when calling the pipeline for generation.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
prompt (`str` or `List[str]`, *optional*, defaults to `None`):
|
| 148 |
+
The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
|
| 149 |
+
height (`int`, *optional*, defaults to 512):
|
| 150 |
+
The height in pixels of the generated image.
|
| 151 |
+
width (`int`, *optional*, defaults to 512):
|
| 152 |
+
The width in pixels of the generated image.
|
| 153 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 154 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 155 |
+
expense of slower inference.
|
| 156 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 157 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 158 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 159 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 160 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 161 |
+
usually at the expense of lower image quality.
|
| 162 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 163 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 164 |
+
if `guidance_scale` is less than `1`).
|
| 165 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 166 |
+
The number of images to generate per prompt.
|
| 167 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 168 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 169 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 170 |
+
generator (`torch.Generator`, *optional*):
|
| 171 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 172 |
+
deterministic.
|
| 173 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 174 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 175 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 176 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 177 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 178 |
+
The output format of the generate image. Choose between
|
| 179 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 180 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 181 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 182 |
+
plain tuple.
|
| 183 |
+
callback (`Callable`, *optional*):
|
| 184 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 185 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 186 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 187 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 188 |
+
called at every step.
|
| 189 |
+
text_embeddings (`torch.FloatTensor`, *optional*, defaults to `None`):
|
| 190 |
+
Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
|
| 191 |
+
`prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
|
| 192 |
+
the supplied `prompt`.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 196 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 197 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 198 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 199 |
+
(nsfw) content, according to the `safety_checker`.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 203 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 204 |
+
|
| 205 |
+
if (callback_steps is None) or (
|
| 206 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 207 |
+
):
|
| 208 |
+
raise ValueError(
|
| 209 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 210 |
+
f" {type(callback_steps)}."
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
if text_embeddings is None:
|
| 214 |
+
if isinstance(prompt, str):
|
| 215 |
+
batch_size = 1
|
| 216 |
+
elif isinstance(prompt, list):
|
| 217 |
+
batch_size = len(prompt)
|
| 218 |
+
else:
|
| 219 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 220 |
+
|
| 221 |
+
# get prompt text embeddings
|
| 222 |
+
text_inputs = self.tokenizer(
|
| 223 |
+
prompt,
|
| 224 |
+
padding="max_length",
|
| 225 |
+
max_length=self.tokenizer.model_max_length,
|
| 226 |
+
return_tensors="pt",
|
| 227 |
+
)
|
| 228 |
+
text_input_ids = text_inputs.input_ids
|
| 229 |
+
|
| 230 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 231 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 232 |
+
print(
|
| 233 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 234 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 235 |
+
)
|
| 236 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 237 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 238 |
+
else:
|
| 239 |
+
batch_size = text_embeddings.shape[0]
|
| 240 |
+
|
| 241 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 242 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 243 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 244 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 245 |
+
|
| 246 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 247 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 248 |
+
# corresponds to doing no classifier free guidance.
|
| 249 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 250 |
+
# get unconditional embeddings for classifier free guidance
|
| 251 |
+
if do_classifier_free_guidance:
|
| 252 |
+
uncond_tokens: List[str]
|
| 253 |
+
if negative_prompt is None:
|
| 254 |
+
uncond_tokens = [""] * batch_size
|
| 255 |
+
elif type(prompt) is not type(negative_prompt):
|
| 256 |
+
raise TypeError(
|
| 257 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 258 |
+
f" {type(prompt)}."
|
| 259 |
+
)
|
| 260 |
+
elif isinstance(negative_prompt, str):
|
| 261 |
+
uncond_tokens = [negative_prompt]
|
| 262 |
+
elif batch_size != len(negative_prompt):
|
| 263 |
+
raise ValueError(
|
| 264 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 265 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 266 |
+
" the batch size of `prompt`."
|
| 267 |
+
)
|
| 268 |
+
else:
|
| 269 |
+
uncond_tokens = negative_prompt
|
| 270 |
+
|
| 271 |
+
max_length = self.tokenizer.model_max_length
|
| 272 |
+
uncond_input = self.tokenizer(
|
| 273 |
+
uncond_tokens,
|
| 274 |
+
padding="max_length",
|
| 275 |
+
max_length=max_length,
|
| 276 |
+
truncation=True,
|
| 277 |
+
return_tensors="pt",
|
| 278 |
+
)
|
| 279 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 280 |
+
|
| 281 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 282 |
+
seq_len = uncond_embeddings.shape[1]
|
| 283 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 284 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 285 |
+
|
| 286 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 287 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 288 |
+
# to avoid doing two forward passes
|
| 289 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 290 |
+
|
| 291 |
+
# get the initial random noise unless the user supplied it
|
| 292 |
+
|
| 293 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 294 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 295 |
+
# However this currently doesn't work in `mps`.
|
| 296 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 297 |
+
latents_dtype = text_embeddings.dtype
|
| 298 |
+
if latents is None:
|
| 299 |
+
if self.device.type == "mps":
|
| 300 |
+
# randn does not work reproducibly on mps
|
| 301 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 302 |
+
self.device
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 306 |
+
else:
|
| 307 |
+
if latents.shape != latents_shape:
|
| 308 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 309 |
+
latents = latents.to(self.device)
|
| 310 |
+
|
| 311 |
+
# set timesteps
|
| 312 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 313 |
+
|
| 314 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 315 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 316 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 317 |
+
|
| 318 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 319 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 320 |
+
|
| 321 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 322 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 323 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 324 |
+
# and should be between [0, 1]
|
| 325 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 326 |
+
extra_step_kwargs = {}
|
| 327 |
+
if accepts_eta:
|
| 328 |
+
extra_step_kwargs["eta"] = eta
|
| 329 |
+
|
| 330 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 331 |
+
# expand the latents if we are doing classifier free guidance
|
| 332 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 333 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 334 |
+
|
| 335 |
+
# predict the noise residual
|
| 336 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 337 |
+
|
| 338 |
+
# perform guidance
|
| 339 |
+
if do_classifier_free_guidance:
|
| 340 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 341 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 342 |
+
|
| 343 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 344 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 345 |
+
|
| 346 |
+
# call the callback, if provided
|
| 347 |
+
if callback is not None and i % callback_steps == 0:
|
| 348 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 349 |
+
callback(step_idx, t, latents)
|
| 350 |
+
|
| 351 |
+
latents = 1 / 0.18215 * latents
|
| 352 |
+
image = self.vae.decode(latents).sample
|
| 353 |
+
|
| 354 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 355 |
+
|
| 356 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 357 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 358 |
+
|
| 359 |
+
if self.safety_checker is not None:
|
| 360 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 361 |
+
self.device
|
| 362 |
+
)
|
| 363 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 364 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 365 |
+
)
|
| 366 |
+
else:
|
| 367 |
+
has_nsfw_concept = None
|
| 368 |
+
|
| 369 |
+
if output_type == "pil":
|
| 370 |
+
image = self.numpy_to_pil(image)
|
| 371 |
+
|
| 372 |
+
if not return_dict:
|
| 373 |
+
return (image, has_nsfw_concept)
|
| 374 |
+
|
| 375 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 376 |
+
|
| 377 |
+
def embed_text(self, text):
|
| 378 |
+
"""takes in text and turns it into text embeddings"""
|
| 379 |
+
text_input = self.tokenizer(
|
| 380 |
+
text,
|
| 381 |
+
padding="max_length",
|
| 382 |
+
max_length=self.tokenizer.model_max_length,
|
| 383 |
+
truncation=True,
|
| 384 |
+
return_tensors="pt",
|
| 385 |
+
)
|
| 386 |
+
with torch.no_grad():
|
| 387 |
+
embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 388 |
+
return embed
|
| 389 |
+
|
| 390 |
+
def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
|
| 391 |
+
"""Takes in random seed and returns corresponding noise vector"""
|
| 392 |
+
return torch.randn(
|
| 393 |
+
(1, self.unet.config.in_channels, height // 8, width // 8),
|
| 394 |
+
generator=torch.Generator(device=self.device).manual_seed(seed),
|
| 395 |
+
device=self.device,
|
| 396 |
+
dtype=dtype,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
def walk(
|
| 400 |
+
self,
|
| 401 |
+
prompts: List[str],
|
| 402 |
+
seeds: List[int],
|
| 403 |
+
num_interpolation_steps: Optional[int] = 6,
|
| 404 |
+
output_dir: Optional[str] = "./dreams",
|
| 405 |
+
name: Optional[str] = None,
|
| 406 |
+
batch_size: Optional[int] = 1,
|
| 407 |
+
height: Optional[int] = 512,
|
| 408 |
+
width: Optional[int] = 512,
|
| 409 |
+
guidance_scale: Optional[float] = 7.5,
|
| 410 |
+
num_inference_steps: Optional[int] = 50,
|
| 411 |
+
eta: Optional[float] = 0.0,
|
| 412 |
+
) -> List[str]:
|
| 413 |
+
"""
|
| 414 |
+
Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
prompts (`List[str]`):
|
| 418 |
+
List of prompts to generate images for.
|
| 419 |
+
seeds (`List[int]`):
|
| 420 |
+
List of seeds corresponding to provided prompts. Must be the same length as prompts.
|
| 421 |
+
num_interpolation_steps (`int`, *optional*, defaults to 6):
|
| 422 |
+
Number of interpolation steps to take between prompts.
|
| 423 |
+
output_dir (`str`, *optional*, defaults to `./dreams`):
|
| 424 |
+
Directory to save the generated images to.
|
| 425 |
+
name (`str`, *optional*, defaults to `None`):
|
| 426 |
+
Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
|
| 427 |
+
be the current time.
|
| 428 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 429 |
+
Number of images to generate at once.
|
| 430 |
+
height (`int`, *optional*, defaults to 512):
|
| 431 |
+
Height of the generated images.
|
| 432 |
+
width (`int`, *optional*, defaults to 512):
|
| 433 |
+
Width of the generated images.
|
| 434 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 435 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 436 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 437 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 438 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 439 |
+
usually at the expense of lower image quality.
|
| 440 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 441 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 442 |
+
expense of slower inference.
|
| 443 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 444 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 445 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 446 |
+
|
| 447 |
+
Returns:
|
| 448 |
+
`List[str]`: List of paths to the generated images.
|
| 449 |
+
"""
|
| 450 |
+
if not len(prompts) == len(seeds):
|
| 451 |
+
raise ValueError(
|
| 452 |
+
f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
name = name or time.strftime("%Y%m%d-%H%M%S")
|
| 456 |
+
save_path = Path(output_dir) / name
|
| 457 |
+
save_path.mkdir(exist_ok=True, parents=True)
|
| 458 |
+
|
| 459 |
+
frame_idx = 0
|
| 460 |
+
frame_filepaths = []
|
| 461 |
+
for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
|
| 462 |
+
# Embed Text
|
| 463 |
+
embed_a = self.embed_text(prompt_a)
|
| 464 |
+
embed_b = self.embed_text(prompt_b)
|
| 465 |
+
|
| 466 |
+
# Get Noise
|
| 467 |
+
noise_dtype = embed_a.dtype
|
| 468 |
+
noise_a = self.get_noise(seed_a, noise_dtype, height, width)
|
| 469 |
+
noise_b = self.get_noise(seed_b, noise_dtype, height, width)
|
| 470 |
+
|
| 471 |
+
noise_batch, embeds_batch = None, None
|
| 472 |
+
T = np.linspace(0.0, 1.0, num_interpolation_steps)
|
| 473 |
+
for i, t in enumerate(T):
|
| 474 |
+
noise = slerp(float(t), noise_a, noise_b)
|
| 475 |
+
embed = torch.lerp(embed_a, embed_b, t)
|
| 476 |
+
|
| 477 |
+
noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
|
| 478 |
+
embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
|
| 479 |
+
|
| 480 |
+
batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
|
| 481 |
+
if batch_is_ready:
|
| 482 |
+
outputs = self(
|
| 483 |
+
latents=noise_batch,
|
| 484 |
+
text_embeddings=embeds_batch,
|
| 485 |
+
height=height,
|
| 486 |
+
width=width,
|
| 487 |
+
guidance_scale=guidance_scale,
|
| 488 |
+
eta=eta,
|
| 489 |
+
num_inference_steps=num_inference_steps,
|
| 490 |
+
)
|
| 491 |
+
noise_batch, embeds_batch = None, None
|
| 492 |
+
|
| 493 |
+
for image in outputs["images"]:
|
| 494 |
+
frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
|
| 495 |
+
image.save(frame_filepath)
|
| 496 |
+
frame_filepaths.append(frame_filepath)
|
| 497 |
+
frame_idx += 1
|
| 498 |
+
return frame_filepaths
|
v0.27.0/ip_adapter_face_id.py
ADDED
|
@@ -0,0 +1,1406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from packaging import version
|
| 22 |
+
from safetensors import safe_open
|
| 23 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 24 |
+
|
| 25 |
+
from diffusers.configuration_utils import FrozenDict
|
| 26 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 27 |
+
from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 28 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 29 |
+
from diffusers.models.lora import LoRALinearLayer, adjust_lora_scale_text_encoder
|
| 30 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 31 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 32 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 33 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 34 |
+
from diffusers.utils import (
|
| 35 |
+
USE_PEFT_BACKEND,
|
| 36 |
+
_get_model_file,
|
| 37 |
+
deprecate,
|
| 38 |
+
logging,
|
| 39 |
+
scale_lora_layers,
|
| 40 |
+
unscale_lora_layers,
|
| 41 |
+
)
|
| 42 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class LoRAIPAdapterAttnProcessor(nn.Module):
|
| 49 |
+
r"""
|
| 50 |
+
Attention processor for IP-Adapater.
|
| 51 |
+
Args:
|
| 52 |
+
hidden_size (`int`):
|
| 53 |
+
The hidden size of the attention layer.
|
| 54 |
+
cross_attention_dim (`int`):
|
| 55 |
+
The number of channels in the `encoder_hidden_states`.
|
| 56 |
+
rank (`int`, defaults to 4):
|
| 57 |
+
The dimension of the LoRA update matrices.
|
| 58 |
+
network_alpha (`int`, *optional*):
|
| 59 |
+
Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
|
| 60 |
+
lora_scale (`float`, defaults to 1.0):
|
| 61 |
+
the weight scale of LoRA.
|
| 62 |
+
scale (`float`, defaults to 1.0):
|
| 63 |
+
the weight scale of image prompt.
|
| 64 |
+
num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
|
| 65 |
+
The context length of the image features.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
hidden_size,
|
| 71 |
+
cross_attention_dim=None,
|
| 72 |
+
rank=4,
|
| 73 |
+
network_alpha=None,
|
| 74 |
+
lora_scale=1.0,
|
| 75 |
+
scale=1.0,
|
| 76 |
+
num_tokens=4,
|
| 77 |
+
):
|
| 78 |
+
super().__init__()
|
| 79 |
+
|
| 80 |
+
self.rank = rank
|
| 81 |
+
self.lora_scale = lora_scale
|
| 82 |
+
|
| 83 |
+
self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
|
| 84 |
+
self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
|
| 85 |
+
self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
|
| 86 |
+
self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
|
| 87 |
+
|
| 88 |
+
self.hidden_size = hidden_size
|
| 89 |
+
self.cross_attention_dim = cross_attention_dim
|
| 90 |
+
self.scale = scale
|
| 91 |
+
self.num_tokens = num_tokens
|
| 92 |
+
|
| 93 |
+
self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 94 |
+
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 95 |
+
|
| 96 |
+
def __call__(
|
| 97 |
+
self,
|
| 98 |
+
attn,
|
| 99 |
+
hidden_states,
|
| 100 |
+
encoder_hidden_states=None,
|
| 101 |
+
attention_mask=None,
|
| 102 |
+
temb=None,
|
| 103 |
+
):
|
| 104 |
+
residual = hidden_states
|
| 105 |
+
|
| 106 |
+
# separate ip_hidden_states from encoder_hidden_states
|
| 107 |
+
if encoder_hidden_states is not None:
|
| 108 |
+
if isinstance(encoder_hidden_states, tuple):
|
| 109 |
+
encoder_hidden_states, ip_hidden_states = encoder_hidden_states
|
| 110 |
+
else:
|
| 111 |
+
deprecation_message = (
|
| 112 |
+
"You have passed a tensor as `encoder_hidden_states`.This is deprecated and will be removed in a future release."
|
| 113 |
+
" Please make sure to update your script to pass `encoder_hidden_states` as a tuple to supress this warning."
|
| 114 |
+
)
|
| 115 |
+
deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
|
| 116 |
+
end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
|
| 117 |
+
encoder_hidden_states, ip_hidden_states = (
|
| 118 |
+
encoder_hidden_states[:, :end_pos, :],
|
| 119 |
+
[encoder_hidden_states[:, end_pos:, :]],
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if attn.spatial_norm is not None:
|
| 123 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 124 |
+
|
| 125 |
+
input_ndim = hidden_states.ndim
|
| 126 |
+
|
| 127 |
+
if input_ndim == 4:
|
| 128 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 129 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 130 |
+
|
| 131 |
+
batch_size, sequence_length, _ = (
|
| 132 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 133 |
+
)
|
| 134 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 135 |
+
|
| 136 |
+
if attn.group_norm is not None:
|
| 137 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 138 |
+
|
| 139 |
+
query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
|
| 140 |
+
|
| 141 |
+
if encoder_hidden_states is None:
|
| 142 |
+
encoder_hidden_states = hidden_states
|
| 143 |
+
elif attn.norm_cross:
|
| 144 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 145 |
+
|
| 146 |
+
key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
|
| 147 |
+
value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
|
| 148 |
+
|
| 149 |
+
query = attn.head_to_batch_dim(query)
|
| 150 |
+
key = attn.head_to_batch_dim(key)
|
| 151 |
+
value = attn.head_to_batch_dim(value)
|
| 152 |
+
|
| 153 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 154 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 155 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 156 |
+
|
| 157 |
+
# for ip-adapter
|
| 158 |
+
ip_key = self.to_k_ip(ip_hidden_states)
|
| 159 |
+
ip_value = self.to_v_ip(ip_hidden_states)
|
| 160 |
+
|
| 161 |
+
ip_key = attn.head_to_batch_dim(ip_key)
|
| 162 |
+
ip_value = attn.head_to_batch_dim(ip_value)
|
| 163 |
+
|
| 164 |
+
ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
|
| 165 |
+
ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
|
| 166 |
+
ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
|
| 167 |
+
|
| 168 |
+
hidden_states = hidden_states + self.scale * ip_hidden_states
|
| 169 |
+
|
| 170 |
+
# linear proj
|
| 171 |
+
hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
|
| 172 |
+
# dropout
|
| 173 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 174 |
+
|
| 175 |
+
if input_ndim == 4:
|
| 176 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 177 |
+
|
| 178 |
+
if attn.residual_connection:
|
| 179 |
+
hidden_states = hidden_states + residual
|
| 180 |
+
|
| 181 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 182 |
+
|
| 183 |
+
return hidden_states
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class LoRAIPAdapterAttnProcessor2_0(nn.Module):
|
| 187 |
+
r"""
|
| 188 |
+
Attention processor for IP-Adapater for PyTorch 2.0.
|
| 189 |
+
Args:
|
| 190 |
+
hidden_size (`int`):
|
| 191 |
+
The hidden size of the attention layer.
|
| 192 |
+
cross_attention_dim (`int`):
|
| 193 |
+
The number of channels in the `encoder_hidden_states`.
|
| 194 |
+
rank (`int`, defaults to 4):
|
| 195 |
+
The dimension of the LoRA update matrices.
|
| 196 |
+
network_alpha (`int`, *optional*):
|
| 197 |
+
Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
|
| 198 |
+
lora_scale (`float`, defaults to 1.0):
|
| 199 |
+
the weight scale of LoRA.
|
| 200 |
+
scale (`float`, defaults to 1.0):
|
| 201 |
+
the weight scale of image prompt.
|
| 202 |
+
num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
|
| 203 |
+
The context length of the image features.
|
| 204 |
+
"""
|
| 205 |
+
|
| 206 |
+
def __init__(
|
| 207 |
+
self,
|
| 208 |
+
hidden_size,
|
| 209 |
+
cross_attention_dim=None,
|
| 210 |
+
rank=4,
|
| 211 |
+
network_alpha=None,
|
| 212 |
+
lora_scale=1.0,
|
| 213 |
+
scale=1.0,
|
| 214 |
+
num_tokens=4,
|
| 215 |
+
):
|
| 216 |
+
super().__init__()
|
| 217 |
+
|
| 218 |
+
self.rank = rank
|
| 219 |
+
self.lora_scale = lora_scale
|
| 220 |
+
|
| 221 |
+
self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
|
| 222 |
+
self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
|
| 223 |
+
self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
|
| 224 |
+
self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
|
| 225 |
+
|
| 226 |
+
self.hidden_size = hidden_size
|
| 227 |
+
self.cross_attention_dim = cross_attention_dim
|
| 228 |
+
self.scale = scale
|
| 229 |
+
self.num_tokens = num_tokens
|
| 230 |
+
|
| 231 |
+
self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 232 |
+
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 233 |
+
|
| 234 |
+
def __call__(
|
| 235 |
+
self,
|
| 236 |
+
attn,
|
| 237 |
+
hidden_states,
|
| 238 |
+
encoder_hidden_states=None,
|
| 239 |
+
attention_mask=None,
|
| 240 |
+
temb=None,
|
| 241 |
+
):
|
| 242 |
+
residual = hidden_states
|
| 243 |
+
|
| 244 |
+
# separate ip_hidden_states from encoder_hidden_states
|
| 245 |
+
if encoder_hidden_states is not None:
|
| 246 |
+
if isinstance(encoder_hidden_states, tuple):
|
| 247 |
+
encoder_hidden_states, ip_hidden_states = encoder_hidden_states
|
| 248 |
+
else:
|
| 249 |
+
deprecation_message = (
|
| 250 |
+
"You have passed a tensor as `encoder_hidden_states`.This is deprecated and will be removed in a future release."
|
| 251 |
+
" Please make sure to update your script to pass `encoder_hidden_states` as a tuple to supress this warning."
|
| 252 |
+
)
|
| 253 |
+
deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
|
| 254 |
+
end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
|
| 255 |
+
encoder_hidden_states, ip_hidden_states = (
|
| 256 |
+
encoder_hidden_states[:, :end_pos, :],
|
| 257 |
+
[encoder_hidden_states[:, end_pos:, :]],
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
if attn.spatial_norm is not None:
|
| 261 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 262 |
+
|
| 263 |
+
input_ndim = hidden_states.ndim
|
| 264 |
+
|
| 265 |
+
if input_ndim == 4:
|
| 266 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 267 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 268 |
+
|
| 269 |
+
batch_size, sequence_length, _ = (
|
| 270 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
if attention_mask is not None:
|
| 274 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 275 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 276 |
+
# (batch, heads, source_length, target_length)
|
| 277 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 278 |
+
|
| 279 |
+
if attn.group_norm is not None:
|
| 280 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 281 |
+
|
| 282 |
+
query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
|
| 283 |
+
|
| 284 |
+
if encoder_hidden_states is None:
|
| 285 |
+
encoder_hidden_states = hidden_states
|
| 286 |
+
elif attn.norm_cross:
|
| 287 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 288 |
+
|
| 289 |
+
key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
|
| 290 |
+
value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
|
| 291 |
+
|
| 292 |
+
inner_dim = key.shape[-1]
|
| 293 |
+
head_dim = inner_dim // attn.heads
|
| 294 |
+
|
| 295 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 296 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 297 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 298 |
+
|
| 299 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 300 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 301 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 302 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 306 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 307 |
+
|
| 308 |
+
# for ip-adapter
|
| 309 |
+
ip_key = self.to_k_ip(ip_hidden_states)
|
| 310 |
+
ip_value = self.to_v_ip(ip_hidden_states)
|
| 311 |
+
|
| 312 |
+
ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 313 |
+
ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 314 |
+
|
| 315 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 316 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 317 |
+
ip_hidden_states = F.scaled_dot_product_attention(
|
| 318 |
+
query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 322 |
+
ip_hidden_states = ip_hidden_states.to(query.dtype)
|
| 323 |
+
|
| 324 |
+
hidden_states = hidden_states + self.scale * ip_hidden_states
|
| 325 |
+
|
| 326 |
+
# linear proj
|
| 327 |
+
hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
|
| 328 |
+
# dropout
|
| 329 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 330 |
+
|
| 331 |
+
if input_ndim == 4:
|
| 332 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 333 |
+
|
| 334 |
+
if attn.residual_connection:
|
| 335 |
+
hidden_states = hidden_states + residual
|
| 336 |
+
|
| 337 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 338 |
+
|
| 339 |
+
return hidden_states
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class IPAdapterFullImageProjection(nn.Module):
|
| 343 |
+
def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1):
|
| 344 |
+
super().__init__()
|
| 345 |
+
from diffusers.models.attention import FeedForward
|
| 346 |
+
|
| 347 |
+
self.num_tokens = num_tokens
|
| 348 |
+
self.cross_attention_dim = cross_attention_dim
|
| 349 |
+
self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn="gelu")
|
| 350 |
+
self.norm = nn.LayerNorm(cross_attention_dim)
|
| 351 |
+
|
| 352 |
+
def forward(self, image_embeds: torch.FloatTensor):
|
| 353 |
+
x = self.ff(image_embeds)
|
| 354 |
+
x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
|
| 355 |
+
return self.norm(x)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 359 |
+
"""
|
| 360 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 361 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 362 |
+
"""
|
| 363 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 364 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 365 |
+
# rescale the results from guidance (fixes overexposure)
|
| 366 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 367 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 368 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 369 |
+
return noise_cfg
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def retrieve_timesteps(
|
| 373 |
+
scheduler,
|
| 374 |
+
num_inference_steps: Optional[int] = None,
|
| 375 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 376 |
+
timesteps: Optional[List[int]] = None,
|
| 377 |
+
**kwargs,
|
| 378 |
+
):
|
| 379 |
+
"""
|
| 380 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 381 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 382 |
+
|
| 383 |
+
Args:
|
| 384 |
+
scheduler (`SchedulerMixin`):
|
| 385 |
+
The scheduler to get timesteps from.
|
| 386 |
+
num_inference_steps (`int`):
|
| 387 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 388 |
+
`timesteps` must be `None`.
|
| 389 |
+
device (`str` or `torch.device`, *optional*):
|
| 390 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 391 |
+
timesteps (`List[int]`, *optional*):
|
| 392 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 393 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 394 |
+
must be `None`.
|
| 395 |
+
|
| 396 |
+
Returns:
|
| 397 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 398 |
+
second element is the number of inference steps.
|
| 399 |
+
"""
|
| 400 |
+
if timesteps is not None:
|
| 401 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 402 |
+
if not accepts_timesteps:
|
| 403 |
+
raise ValueError(
|
| 404 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 405 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 406 |
+
)
|
| 407 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 408 |
+
timesteps = scheduler.timesteps
|
| 409 |
+
num_inference_steps = len(timesteps)
|
| 410 |
+
else:
|
| 411 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 412 |
+
timesteps = scheduler.timesteps
|
| 413 |
+
return timesteps, num_inference_steps
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class IPAdapterFaceIDStableDiffusionPipeline(
|
| 417 |
+
DiffusionPipeline,
|
| 418 |
+
StableDiffusionMixin,
|
| 419 |
+
TextualInversionLoaderMixin,
|
| 420 |
+
LoraLoaderMixin,
|
| 421 |
+
IPAdapterMixin,
|
| 422 |
+
FromSingleFileMixin,
|
| 423 |
+
):
|
| 424 |
+
r"""
|
| 425 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 426 |
+
|
| 427 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 428 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 429 |
+
|
| 430 |
+
The pipeline also inherits the following loading methods:
|
| 431 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 432 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 433 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 434 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 435 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
vae ([`AutoencoderKL`]):
|
| 439 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 440 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 441 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 442 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 443 |
+
A `CLIPTokenizer` to tokenize text.
|
| 444 |
+
unet ([`UNet2DConditionModel`]):
|
| 445 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 446 |
+
scheduler ([`SchedulerMixin`]):
|
| 447 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 448 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 449 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 450 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 451 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 452 |
+
about a model's potential harms.
|
| 453 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 454 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 458 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 459 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 460 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 461 |
+
|
| 462 |
+
def __init__(
|
| 463 |
+
self,
|
| 464 |
+
vae: AutoencoderKL,
|
| 465 |
+
text_encoder: CLIPTextModel,
|
| 466 |
+
tokenizer: CLIPTokenizer,
|
| 467 |
+
unet: UNet2DConditionModel,
|
| 468 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 469 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 470 |
+
feature_extractor: CLIPImageProcessor,
|
| 471 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 472 |
+
requires_safety_checker: bool = True,
|
| 473 |
+
):
|
| 474 |
+
super().__init__()
|
| 475 |
+
|
| 476 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 477 |
+
deprecation_message = (
|
| 478 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 479 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 480 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 481 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 482 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 483 |
+
" file"
|
| 484 |
+
)
|
| 485 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 486 |
+
new_config = dict(scheduler.config)
|
| 487 |
+
new_config["steps_offset"] = 1
|
| 488 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 489 |
+
|
| 490 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 491 |
+
deprecation_message = (
|
| 492 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 493 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 494 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 495 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 496 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 497 |
+
)
|
| 498 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 499 |
+
new_config = dict(scheduler.config)
|
| 500 |
+
new_config["clip_sample"] = False
|
| 501 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 502 |
+
|
| 503 |
+
if safety_checker is None and requires_safety_checker:
|
| 504 |
+
logger.warning(
|
| 505 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 506 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 507 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 508 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 509 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 510 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
if safety_checker is not None and feature_extractor is None:
|
| 514 |
+
raise ValueError(
|
| 515 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 516 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 520 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 521 |
+
) < version.parse("0.9.0.dev0")
|
| 522 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 523 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 524 |
+
deprecation_message = (
|
| 525 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 526 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 527 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 528 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 529 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 530 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 531 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 532 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 533 |
+
" the `unet/config.json` file"
|
| 534 |
+
)
|
| 535 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 536 |
+
new_config = dict(unet.config)
|
| 537 |
+
new_config["sample_size"] = 64
|
| 538 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 539 |
+
|
| 540 |
+
self.register_modules(
|
| 541 |
+
vae=vae,
|
| 542 |
+
text_encoder=text_encoder,
|
| 543 |
+
tokenizer=tokenizer,
|
| 544 |
+
unet=unet,
|
| 545 |
+
scheduler=scheduler,
|
| 546 |
+
safety_checker=safety_checker,
|
| 547 |
+
feature_extractor=feature_extractor,
|
| 548 |
+
image_encoder=image_encoder,
|
| 549 |
+
)
|
| 550 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 551 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 552 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 553 |
+
|
| 554 |
+
def load_ip_adapter_face_id(self, pretrained_model_name_or_path_or_dict, weight_name, **kwargs):
|
| 555 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
| 556 |
+
force_download = kwargs.pop("force_download", False)
|
| 557 |
+
resume_download = kwargs.pop("resume_download", False)
|
| 558 |
+
proxies = kwargs.pop("proxies", None)
|
| 559 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
| 560 |
+
token = kwargs.pop("token", None)
|
| 561 |
+
revision = kwargs.pop("revision", None)
|
| 562 |
+
subfolder = kwargs.pop("subfolder", None)
|
| 563 |
+
|
| 564 |
+
user_agent = {
|
| 565 |
+
"file_type": "attn_procs_weights",
|
| 566 |
+
"framework": "pytorch",
|
| 567 |
+
}
|
| 568 |
+
model_file = _get_model_file(
|
| 569 |
+
pretrained_model_name_or_path_or_dict,
|
| 570 |
+
weights_name=weight_name,
|
| 571 |
+
cache_dir=cache_dir,
|
| 572 |
+
force_download=force_download,
|
| 573 |
+
resume_download=resume_download,
|
| 574 |
+
proxies=proxies,
|
| 575 |
+
local_files_only=local_files_only,
|
| 576 |
+
token=token,
|
| 577 |
+
revision=revision,
|
| 578 |
+
subfolder=subfolder,
|
| 579 |
+
user_agent=user_agent,
|
| 580 |
+
)
|
| 581 |
+
if weight_name.endswith(".safetensors"):
|
| 582 |
+
state_dict = {"image_proj": {}, "ip_adapter": {}}
|
| 583 |
+
with safe_open(model_file, framework="pt", device="cpu") as f:
|
| 584 |
+
for key in f.keys():
|
| 585 |
+
if key.startswith("image_proj."):
|
| 586 |
+
state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
|
| 587 |
+
elif key.startswith("ip_adapter."):
|
| 588 |
+
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
|
| 589 |
+
else:
|
| 590 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
| 591 |
+
self._load_ip_adapter_weights(state_dict)
|
| 592 |
+
|
| 593 |
+
def convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
|
| 594 |
+
updated_state_dict = {}
|
| 595 |
+
clip_embeddings_dim_in = state_dict["proj.0.weight"].shape[1]
|
| 596 |
+
clip_embeddings_dim_out = state_dict["proj.0.weight"].shape[0]
|
| 597 |
+
multiplier = clip_embeddings_dim_out // clip_embeddings_dim_in
|
| 598 |
+
norm_layer = "norm.weight"
|
| 599 |
+
cross_attention_dim = state_dict[norm_layer].shape[0]
|
| 600 |
+
num_tokens = state_dict["proj.2.weight"].shape[0] // cross_attention_dim
|
| 601 |
+
|
| 602 |
+
image_projection = IPAdapterFullImageProjection(
|
| 603 |
+
cross_attention_dim=cross_attention_dim,
|
| 604 |
+
image_embed_dim=clip_embeddings_dim_in,
|
| 605 |
+
mult=multiplier,
|
| 606 |
+
num_tokens=num_tokens,
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
for key, value in state_dict.items():
|
| 610 |
+
diffusers_name = key.replace("proj.0", "ff.net.0.proj")
|
| 611 |
+
diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
|
| 612 |
+
updated_state_dict[diffusers_name] = value
|
| 613 |
+
|
| 614 |
+
image_projection.load_state_dict(updated_state_dict)
|
| 615 |
+
return image_projection
|
| 616 |
+
|
| 617 |
+
def _load_ip_adapter_weights(self, state_dict):
|
| 618 |
+
from diffusers.models.attention_processor import (
|
| 619 |
+
AttnProcessor,
|
| 620 |
+
AttnProcessor2_0,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
num_image_text_embeds = 4
|
| 624 |
+
|
| 625 |
+
self.unet.encoder_hid_proj = None
|
| 626 |
+
|
| 627 |
+
# set ip-adapter cross-attention processors & load state_dict
|
| 628 |
+
attn_procs = {}
|
| 629 |
+
key_id = 0
|
| 630 |
+
for name in self.unet.attn_processors.keys():
|
| 631 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
|
| 632 |
+
if name.startswith("mid_block"):
|
| 633 |
+
hidden_size = self.unet.config.block_out_channels[-1]
|
| 634 |
+
elif name.startswith("up_blocks"):
|
| 635 |
+
block_id = int(name[len("up_blocks.")])
|
| 636 |
+
hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
|
| 637 |
+
elif name.startswith("down_blocks"):
|
| 638 |
+
block_id = int(name[len("down_blocks.")])
|
| 639 |
+
hidden_size = self.unet.config.block_out_channels[block_id]
|
| 640 |
+
if cross_attention_dim is None or "motion_modules" in name:
|
| 641 |
+
attn_processor_class = (
|
| 642 |
+
AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
|
| 643 |
+
)
|
| 644 |
+
attn_procs[name] = attn_processor_class()
|
| 645 |
+
rank = state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"].shape[0]
|
| 646 |
+
attn_module = self.unet
|
| 647 |
+
for n in name.split(".")[:-1]:
|
| 648 |
+
attn_module = getattr(attn_module, n)
|
| 649 |
+
# Set the `lora_layer` attribute of the attention-related matrices.
|
| 650 |
+
attn_module.to_q.set_lora_layer(
|
| 651 |
+
LoRALinearLayer(
|
| 652 |
+
in_features=attn_module.to_q.in_features,
|
| 653 |
+
out_features=attn_module.to_q.out_features,
|
| 654 |
+
rank=rank,
|
| 655 |
+
)
|
| 656 |
+
)
|
| 657 |
+
attn_module.to_k.set_lora_layer(
|
| 658 |
+
LoRALinearLayer(
|
| 659 |
+
in_features=attn_module.to_k.in_features,
|
| 660 |
+
out_features=attn_module.to_k.out_features,
|
| 661 |
+
rank=rank,
|
| 662 |
+
)
|
| 663 |
+
)
|
| 664 |
+
attn_module.to_v.set_lora_layer(
|
| 665 |
+
LoRALinearLayer(
|
| 666 |
+
in_features=attn_module.to_v.in_features,
|
| 667 |
+
out_features=attn_module.to_v.out_features,
|
| 668 |
+
rank=rank,
|
| 669 |
+
)
|
| 670 |
+
)
|
| 671 |
+
attn_module.to_out[0].set_lora_layer(
|
| 672 |
+
LoRALinearLayer(
|
| 673 |
+
in_features=attn_module.to_out[0].in_features,
|
| 674 |
+
out_features=attn_module.to_out[0].out_features,
|
| 675 |
+
rank=rank,
|
| 676 |
+
)
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
value_dict = {}
|
| 680 |
+
for k, module in attn_module.named_children():
|
| 681 |
+
index = "."
|
| 682 |
+
if not hasattr(module, "set_lora_layer"):
|
| 683 |
+
index = ".0."
|
| 684 |
+
module = module[0]
|
| 685 |
+
lora_layer = getattr(module, "lora_layer")
|
| 686 |
+
for lora_name, w in lora_layer.state_dict().items():
|
| 687 |
+
value_dict.update(
|
| 688 |
+
{
|
| 689 |
+
f"{k}{index}lora_layer.{lora_name}": state_dict["ip_adapter"][
|
| 690 |
+
f"{key_id}.{k}_lora.{lora_name}"
|
| 691 |
+
]
|
| 692 |
+
}
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
attn_module.load_state_dict(value_dict, strict=False)
|
| 696 |
+
attn_module.to(dtype=self.dtype, device=self.device)
|
| 697 |
+
key_id += 1
|
| 698 |
+
else:
|
| 699 |
+
rank = state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"].shape[0]
|
| 700 |
+
attn_processor_class = (
|
| 701 |
+
LoRAIPAdapterAttnProcessor2_0
|
| 702 |
+
if hasattr(F, "scaled_dot_product_attention")
|
| 703 |
+
else LoRAIPAdapterAttnProcessor
|
| 704 |
+
)
|
| 705 |
+
attn_procs[name] = attn_processor_class(
|
| 706 |
+
hidden_size=hidden_size,
|
| 707 |
+
cross_attention_dim=cross_attention_dim,
|
| 708 |
+
scale=1.0,
|
| 709 |
+
rank=rank,
|
| 710 |
+
num_tokens=num_image_text_embeds,
|
| 711 |
+
).to(dtype=self.dtype, device=self.device)
|
| 712 |
+
|
| 713 |
+
value_dict = {}
|
| 714 |
+
for k, w in attn_procs[name].state_dict().items():
|
| 715 |
+
value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"]})
|
| 716 |
+
|
| 717 |
+
attn_procs[name].load_state_dict(value_dict)
|
| 718 |
+
key_id += 1
|
| 719 |
+
|
| 720 |
+
self.unet.set_attn_processor(attn_procs)
|
| 721 |
+
|
| 722 |
+
# convert IP-Adapter Image Projection layers to diffusers
|
| 723 |
+
image_projection = self.convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
|
| 724 |
+
|
| 725 |
+
self.unet.encoder_hid_proj = image_projection.to(device=self.device, dtype=self.dtype)
|
| 726 |
+
self.unet.config.encoder_hid_dim_type = "ip_image_proj"
|
| 727 |
+
|
| 728 |
+
def set_ip_adapter_scale(self, scale):
|
| 729 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
| 730 |
+
for attn_processor in unet.attn_processors.values():
|
| 731 |
+
if isinstance(attn_processor, (LoRAIPAdapterAttnProcessor, LoRAIPAdapterAttnProcessor2_0)):
|
| 732 |
+
attn_processor.scale = scale
|
| 733 |
+
|
| 734 |
+
def _encode_prompt(
|
| 735 |
+
self,
|
| 736 |
+
prompt,
|
| 737 |
+
device,
|
| 738 |
+
num_images_per_prompt,
|
| 739 |
+
do_classifier_free_guidance,
|
| 740 |
+
negative_prompt=None,
|
| 741 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 742 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 743 |
+
lora_scale: Optional[float] = None,
|
| 744 |
+
**kwargs,
|
| 745 |
+
):
|
| 746 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 747 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 748 |
+
|
| 749 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 750 |
+
prompt=prompt,
|
| 751 |
+
device=device,
|
| 752 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 753 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 754 |
+
negative_prompt=negative_prompt,
|
| 755 |
+
prompt_embeds=prompt_embeds,
|
| 756 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 757 |
+
lora_scale=lora_scale,
|
| 758 |
+
**kwargs,
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
# concatenate for backwards comp
|
| 762 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 763 |
+
|
| 764 |
+
return prompt_embeds
|
| 765 |
+
|
| 766 |
+
def encode_prompt(
|
| 767 |
+
self,
|
| 768 |
+
prompt,
|
| 769 |
+
device,
|
| 770 |
+
num_images_per_prompt,
|
| 771 |
+
do_classifier_free_guidance,
|
| 772 |
+
negative_prompt=None,
|
| 773 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 774 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 775 |
+
lora_scale: Optional[float] = None,
|
| 776 |
+
clip_skip: Optional[int] = None,
|
| 777 |
+
):
|
| 778 |
+
r"""
|
| 779 |
+
Encodes the prompt into text encoder hidden states.
|
| 780 |
+
|
| 781 |
+
Args:
|
| 782 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 783 |
+
prompt to be encoded
|
| 784 |
+
device: (`torch.device`):
|
| 785 |
+
torch device
|
| 786 |
+
num_images_per_prompt (`int`):
|
| 787 |
+
number of images that should be generated per prompt
|
| 788 |
+
do_classifier_free_guidance (`bool`):
|
| 789 |
+
whether to use classifier free guidance or not
|
| 790 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 791 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 792 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 793 |
+
less than `1`).
|
| 794 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 795 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 796 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 797 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 798 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 799 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 800 |
+
argument.
|
| 801 |
+
lora_scale (`float`, *optional*):
|
| 802 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 803 |
+
clip_skip (`int`, *optional*):
|
| 804 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 805 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 806 |
+
"""
|
| 807 |
+
# set lora scale so that monkey patched LoRA
|
| 808 |
+
# function of text encoder can correctly access it
|
| 809 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 810 |
+
self._lora_scale = lora_scale
|
| 811 |
+
|
| 812 |
+
# dynamically adjust the LoRA scale
|
| 813 |
+
if not USE_PEFT_BACKEND:
|
| 814 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 815 |
+
else:
|
| 816 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 817 |
+
|
| 818 |
+
if prompt is not None and isinstance(prompt, str):
|
| 819 |
+
batch_size = 1
|
| 820 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 821 |
+
batch_size = len(prompt)
|
| 822 |
+
else:
|
| 823 |
+
batch_size = prompt_embeds.shape[0]
|
| 824 |
+
|
| 825 |
+
if prompt_embeds is None:
|
| 826 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 827 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 828 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 829 |
+
|
| 830 |
+
text_inputs = self.tokenizer(
|
| 831 |
+
prompt,
|
| 832 |
+
padding="max_length",
|
| 833 |
+
max_length=self.tokenizer.model_max_length,
|
| 834 |
+
truncation=True,
|
| 835 |
+
return_tensors="pt",
|
| 836 |
+
)
|
| 837 |
+
text_input_ids = text_inputs.input_ids
|
| 838 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 839 |
+
|
| 840 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 841 |
+
text_input_ids, untruncated_ids
|
| 842 |
+
):
|
| 843 |
+
removed_text = self.tokenizer.batch_decode(
|
| 844 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 845 |
+
)
|
| 846 |
+
logger.warning(
|
| 847 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 848 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 852 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 853 |
+
else:
|
| 854 |
+
attention_mask = None
|
| 855 |
+
|
| 856 |
+
if clip_skip is None:
|
| 857 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 858 |
+
prompt_embeds = prompt_embeds[0]
|
| 859 |
+
else:
|
| 860 |
+
prompt_embeds = self.text_encoder(
|
| 861 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 862 |
+
)
|
| 863 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 864 |
+
# all the hidden states from the encoder layers. Then index into
|
| 865 |
+
# the tuple to access the hidden states from the desired layer.
|
| 866 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 867 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 868 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 869 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 870 |
+
# layer.
|
| 871 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 872 |
+
|
| 873 |
+
if self.text_encoder is not None:
|
| 874 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 875 |
+
elif self.unet is not None:
|
| 876 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 877 |
+
else:
|
| 878 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 879 |
+
|
| 880 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 881 |
+
|
| 882 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 883 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 884 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 885 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 886 |
+
|
| 887 |
+
# get unconditional embeddings for classifier free guidance
|
| 888 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 889 |
+
uncond_tokens: List[str]
|
| 890 |
+
if negative_prompt is None:
|
| 891 |
+
uncond_tokens = [""] * batch_size
|
| 892 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 893 |
+
raise TypeError(
|
| 894 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 895 |
+
f" {type(prompt)}."
|
| 896 |
+
)
|
| 897 |
+
elif isinstance(negative_prompt, str):
|
| 898 |
+
uncond_tokens = [negative_prompt]
|
| 899 |
+
elif batch_size != len(negative_prompt):
|
| 900 |
+
raise ValueError(
|
| 901 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 902 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 903 |
+
" the batch size of `prompt`."
|
| 904 |
+
)
|
| 905 |
+
else:
|
| 906 |
+
uncond_tokens = negative_prompt
|
| 907 |
+
|
| 908 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 909 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 910 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 911 |
+
|
| 912 |
+
max_length = prompt_embeds.shape[1]
|
| 913 |
+
uncond_input = self.tokenizer(
|
| 914 |
+
uncond_tokens,
|
| 915 |
+
padding="max_length",
|
| 916 |
+
max_length=max_length,
|
| 917 |
+
truncation=True,
|
| 918 |
+
return_tensors="pt",
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 922 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 923 |
+
else:
|
| 924 |
+
attention_mask = None
|
| 925 |
+
|
| 926 |
+
negative_prompt_embeds = self.text_encoder(
|
| 927 |
+
uncond_input.input_ids.to(device),
|
| 928 |
+
attention_mask=attention_mask,
|
| 929 |
+
)
|
| 930 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 931 |
+
|
| 932 |
+
if do_classifier_free_guidance:
|
| 933 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 934 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 935 |
+
|
| 936 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 937 |
+
|
| 938 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 939 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 940 |
+
|
| 941 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 942 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 943 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 944 |
+
|
| 945 |
+
return prompt_embeds, negative_prompt_embeds
|
| 946 |
+
|
| 947 |
+
def run_safety_checker(self, image, device, dtype):
|
| 948 |
+
if self.safety_checker is None:
|
| 949 |
+
has_nsfw_concept = None
|
| 950 |
+
else:
|
| 951 |
+
if torch.is_tensor(image):
|
| 952 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 953 |
+
else:
|
| 954 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 955 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 956 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 957 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 958 |
+
)
|
| 959 |
+
return image, has_nsfw_concept
|
| 960 |
+
|
| 961 |
+
def decode_latents(self, latents):
|
| 962 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 963 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 964 |
+
|
| 965 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 966 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 967 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 968 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 969 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 970 |
+
return image
|
| 971 |
+
|
| 972 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 973 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 974 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 975 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 976 |
+
# and should be between [0, 1]
|
| 977 |
+
|
| 978 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 979 |
+
extra_step_kwargs = {}
|
| 980 |
+
if accepts_eta:
|
| 981 |
+
extra_step_kwargs["eta"] = eta
|
| 982 |
+
|
| 983 |
+
# check if the scheduler accepts generator
|
| 984 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 985 |
+
if accepts_generator:
|
| 986 |
+
extra_step_kwargs["generator"] = generator
|
| 987 |
+
return extra_step_kwargs
|
| 988 |
+
|
| 989 |
+
def check_inputs(
|
| 990 |
+
self,
|
| 991 |
+
prompt,
|
| 992 |
+
height,
|
| 993 |
+
width,
|
| 994 |
+
callback_steps,
|
| 995 |
+
negative_prompt=None,
|
| 996 |
+
prompt_embeds=None,
|
| 997 |
+
negative_prompt_embeds=None,
|
| 998 |
+
callback_on_step_end_tensor_inputs=None,
|
| 999 |
+
):
|
| 1000 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 1001 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 1002 |
+
|
| 1003 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 1004 |
+
raise ValueError(
|
| 1005 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 1006 |
+
f" {type(callback_steps)}."
|
| 1007 |
+
)
|
| 1008 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 1009 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 1010 |
+
):
|
| 1011 |
+
raise ValueError(
|
| 1012 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
if prompt is not None and prompt_embeds is not None:
|
| 1016 |
+
raise ValueError(
|
| 1017 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 1018 |
+
" only forward one of the two."
|
| 1019 |
+
)
|
| 1020 |
+
elif prompt is None and prompt_embeds is None:
|
| 1021 |
+
raise ValueError(
|
| 1022 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 1023 |
+
)
|
| 1024 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 1025 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 1026 |
+
|
| 1027 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 1028 |
+
raise ValueError(
|
| 1029 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 1030 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 1031 |
+
)
|
| 1032 |
+
|
| 1033 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 1034 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 1035 |
+
raise ValueError(
|
| 1036 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 1037 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 1038 |
+
f" {negative_prompt_embeds.shape}."
|
| 1039 |
+
)
|
| 1040 |
+
|
| 1041 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 1042 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 1043 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 1044 |
+
raise ValueError(
|
| 1045 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 1046 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 1047 |
+
)
|
| 1048 |
+
|
| 1049 |
+
if latents is None:
|
| 1050 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 1051 |
+
else:
|
| 1052 |
+
latents = latents.to(device)
|
| 1053 |
+
|
| 1054 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 1055 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 1056 |
+
return latents
|
| 1057 |
+
|
| 1058 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 1059 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 1060 |
+
"""
|
| 1061 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 1062 |
+
|
| 1063 |
+
Args:
|
| 1064 |
+
timesteps (`torch.Tensor`):
|
| 1065 |
+
generate embedding vectors at these timesteps
|
| 1066 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 1067 |
+
dimension of the embeddings to generate
|
| 1068 |
+
dtype:
|
| 1069 |
+
data type of the generated embeddings
|
| 1070 |
+
|
| 1071 |
+
Returns:
|
| 1072 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 1073 |
+
"""
|
| 1074 |
+
assert len(w.shape) == 1
|
| 1075 |
+
w = w * 1000.0
|
| 1076 |
+
|
| 1077 |
+
half_dim = embedding_dim // 2
|
| 1078 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 1079 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 1080 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 1081 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 1082 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 1083 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 1084 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 1085 |
+
return emb
|
| 1086 |
+
|
| 1087 |
+
@property
|
| 1088 |
+
def guidance_scale(self):
|
| 1089 |
+
return self._guidance_scale
|
| 1090 |
+
|
| 1091 |
+
@property
|
| 1092 |
+
def guidance_rescale(self):
|
| 1093 |
+
return self._guidance_rescale
|
| 1094 |
+
|
| 1095 |
+
@property
|
| 1096 |
+
def clip_skip(self):
|
| 1097 |
+
return self._clip_skip
|
| 1098 |
+
|
| 1099 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1100 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1101 |
+
# corresponds to doing no classifier free guidance.
|
| 1102 |
+
@property
|
| 1103 |
+
def do_classifier_free_guidance(self):
|
| 1104 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 1105 |
+
|
| 1106 |
+
@property
|
| 1107 |
+
def cross_attention_kwargs(self):
|
| 1108 |
+
return self._cross_attention_kwargs
|
| 1109 |
+
|
| 1110 |
+
@property
|
| 1111 |
+
def num_timesteps(self):
|
| 1112 |
+
return self._num_timesteps
|
| 1113 |
+
|
| 1114 |
+
@property
|
| 1115 |
+
def interrupt(self):
|
| 1116 |
+
return self._interrupt
|
| 1117 |
+
|
| 1118 |
+
@torch.no_grad()
|
| 1119 |
+
def __call__(
|
| 1120 |
+
self,
|
| 1121 |
+
prompt: Union[str, List[str]] = None,
|
| 1122 |
+
height: Optional[int] = None,
|
| 1123 |
+
width: Optional[int] = None,
|
| 1124 |
+
num_inference_steps: int = 50,
|
| 1125 |
+
timesteps: List[int] = None,
|
| 1126 |
+
guidance_scale: float = 7.5,
|
| 1127 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1128 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1129 |
+
eta: float = 0.0,
|
| 1130 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1131 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 1132 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1133 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1134 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
| 1135 |
+
output_type: Optional[str] = "pil",
|
| 1136 |
+
return_dict: bool = True,
|
| 1137 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1138 |
+
guidance_rescale: float = 0.0,
|
| 1139 |
+
clip_skip: Optional[int] = None,
|
| 1140 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 1141 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 1142 |
+
**kwargs,
|
| 1143 |
+
):
|
| 1144 |
+
r"""
|
| 1145 |
+
The call function to the pipeline for generation.
|
| 1146 |
+
|
| 1147 |
+
Args:
|
| 1148 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1149 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 1150 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 1151 |
+
The height in pixels of the generated image.
|
| 1152 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 1153 |
+
The width in pixels of the generated image.
|
| 1154 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1155 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1156 |
+
expense of slower inference.
|
| 1157 |
+
timesteps (`List[int]`, *optional*):
|
| 1158 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 1159 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 1160 |
+
passed will be used. Must be in descending order.
|
| 1161 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1162 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 1163 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 1164 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1165 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 1166 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 1167 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1168 |
+
The number of images to generate per prompt.
|
| 1169 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1170 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 1171 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 1172 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1173 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 1174 |
+
generation deterministic.
|
| 1175 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 1176 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 1177 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1178 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 1179 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1180 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 1181 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 1182 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1183 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 1184 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 1185 |
+
image_embeds (`torch.FloatTensor`, *optional*):
|
| 1186 |
+
Pre-generated image embeddings.
|
| 1187 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1188 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 1189 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1190 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1191 |
+
plain tuple.
|
| 1192 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1193 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 1194 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1195 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 1196 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 1197 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 1198 |
+
using zero terminal SNR.
|
| 1199 |
+
clip_skip (`int`, *optional*):
|
| 1200 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1201 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1202 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 1203 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 1204 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 1205 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 1206 |
+
`callback_on_step_end_tensor_inputs`.
|
| 1207 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 1208 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 1209 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 1210 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 1211 |
+
|
| 1212 |
+
Examples:
|
| 1213 |
+
|
| 1214 |
+
Returns:
|
| 1215 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1216 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 1217 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 1218 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 1219 |
+
"not-safe-for-work" (nsfw) content.
|
| 1220 |
+
"""
|
| 1221 |
+
|
| 1222 |
+
callback = kwargs.pop("callback", None)
|
| 1223 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 1224 |
+
|
| 1225 |
+
if callback is not None:
|
| 1226 |
+
deprecate(
|
| 1227 |
+
"callback",
|
| 1228 |
+
"1.0.0",
|
| 1229 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1230 |
+
)
|
| 1231 |
+
if callback_steps is not None:
|
| 1232 |
+
deprecate(
|
| 1233 |
+
"callback_steps",
|
| 1234 |
+
"1.0.0",
|
| 1235 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1236 |
+
)
|
| 1237 |
+
|
| 1238 |
+
# 0. Default height and width to unet
|
| 1239 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 1240 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 1241 |
+
# to deal with lora scaling and other possible forward hooks
|
| 1242 |
+
|
| 1243 |
+
# 1. Check inputs. Raise error if not correct
|
| 1244 |
+
self.check_inputs(
|
| 1245 |
+
prompt,
|
| 1246 |
+
height,
|
| 1247 |
+
width,
|
| 1248 |
+
callback_steps,
|
| 1249 |
+
negative_prompt,
|
| 1250 |
+
prompt_embeds,
|
| 1251 |
+
negative_prompt_embeds,
|
| 1252 |
+
callback_on_step_end_tensor_inputs,
|
| 1253 |
+
)
|
| 1254 |
+
|
| 1255 |
+
self._guidance_scale = guidance_scale
|
| 1256 |
+
self._guidance_rescale = guidance_rescale
|
| 1257 |
+
self._clip_skip = clip_skip
|
| 1258 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1259 |
+
self._interrupt = False
|
| 1260 |
+
|
| 1261 |
+
# 2. Define call parameters
|
| 1262 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1263 |
+
batch_size = 1
|
| 1264 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1265 |
+
batch_size = len(prompt)
|
| 1266 |
+
else:
|
| 1267 |
+
batch_size = prompt_embeds.shape[0]
|
| 1268 |
+
|
| 1269 |
+
device = self._execution_device
|
| 1270 |
+
|
| 1271 |
+
# 3. Encode input prompt
|
| 1272 |
+
lora_scale = (
|
| 1273 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 1277 |
+
prompt,
|
| 1278 |
+
device,
|
| 1279 |
+
num_images_per_prompt,
|
| 1280 |
+
self.do_classifier_free_guidance,
|
| 1281 |
+
negative_prompt,
|
| 1282 |
+
prompt_embeds=prompt_embeds,
|
| 1283 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1284 |
+
lora_scale=lora_scale,
|
| 1285 |
+
clip_skip=self.clip_skip,
|
| 1286 |
+
)
|
| 1287 |
+
|
| 1288 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 1289 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 1290 |
+
# to avoid doing two forward passes
|
| 1291 |
+
if self.do_classifier_free_guidance:
|
| 1292 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 1293 |
+
|
| 1294 |
+
if image_embeds is not None:
|
| 1295 |
+
image_embeds = torch.stack([image_embeds] * num_images_per_prompt, dim=0).to(
|
| 1296 |
+
device=device, dtype=prompt_embeds.dtype
|
| 1297 |
+
)
|
| 1298 |
+
negative_image_embeds = torch.zeros_like(image_embeds)
|
| 1299 |
+
if self.do_classifier_free_guidance:
|
| 1300 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1301 |
+
|
| 1302 |
+
# 4. Prepare timesteps
|
| 1303 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 1304 |
+
|
| 1305 |
+
# 5. Prepare latent variables
|
| 1306 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1307 |
+
latents = self.prepare_latents(
|
| 1308 |
+
batch_size * num_images_per_prompt,
|
| 1309 |
+
num_channels_latents,
|
| 1310 |
+
height,
|
| 1311 |
+
width,
|
| 1312 |
+
prompt_embeds.dtype,
|
| 1313 |
+
device,
|
| 1314 |
+
generator,
|
| 1315 |
+
latents,
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1319 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1320 |
+
|
| 1321 |
+
# 6.1 Add image embeds for IP-Adapter
|
| 1322 |
+
added_cond_kwargs = {"image_embeds": image_embeds} if image_embeds is not None else None
|
| 1323 |
+
|
| 1324 |
+
# 6.2 Optionally get Guidance Scale Embedding
|
| 1325 |
+
timestep_cond = None
|
| 1326 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1327 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1328 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1329 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1330 |
+
).to(device=device, dtype=latents.dtype)
|
| 1331 |
+
|
| 1332 |
+
# 7. Denoising loop
|
| 1333 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1334 |
+
self._num_timesteps = len(timesteps)
|
| 1335 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1336 |
+
for i, t in enumerate(timesteps):
|
| 1337 |
+
if self.interrupt:
|
| 1338 |
+
continue
|
| 1339 |
+
|
| 1340 |
+
# expand the latents if we are doing classifier free guidance
|
| 1341 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1342 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1343 |
+
|
| 1344 |
+
# predict the noise residual
|
| 1345 |
+
noise_pred = self.unet(
|
| 1346 |
+
latent_model_input,
|
| 1347 |
+
t,
|
| 1348 |
+
encoder_hidden_states=prompt_embeds,
|
| 1349 |
+
timestep_cond=timestep_cond,
|
| 1350 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1351 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1352 |
+
return_dict=False,
|
| 1353 |
+
)[0]
|
| 1354 |
+
|
| 1355 |
+
# perform guidance
|
| 1356 |
+
if self.do_classifier_free_guidance:
|
| 1357 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1358 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1359 |
+
|
| 1360 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1361 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1362 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1363 |
+
|
| 1364 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1365 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1366 |
+
|
| 1367 |
+
if callback_on_step_end is not None:
|
| 1368 |
+
callback_kwargs = {}
|
| 1369 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1370 |
+
callback_kwargs[k] = locals()[k]
|
| 1371 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1372 |
+
|
| 1373 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1374 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1375 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1376 |
+
|
| 1377 |
+
# call the callback, if provided
|
| 1378 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1379 |
+
progress_bar.update()
|
| 1380 |
+
if callback is not None and i % callback_steps == 0:
|
| 1381 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1382 |
+
callback(step_idx, t, latents)
|
| 1383 |
+
|
| 1384 |
+
if not output_type == "latent":
|
| 1385 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 1386 |
+
0
|
| 1387 |
+
]
|
| 1388 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1389 |
+
else:
|
| 1390 |
+
image = latents
|
| 1391 |
+
has_nsfw_concept = None
|
| 1392 |
+
|
| 1393 |
+
if has_nsfw_concept is None:
|
| 1394 |
+
do_denormalize = [True] * image.shape[0]
|
| 1395 |
+
else:
|
| 1396 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1397 |
+
|
| 1398 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1399 |
+
|
| 1400 |
+
# Offload all models
|
| 1401 |
+
self.maybe_free_model_hooks()
|
| 1402 |
+
|
| 1403 |
+
if not return_dict:
|
| 1404 |
+
return (image, has_nsfw_concept)
|
| 1405 |
+
|
| 1406 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/latent_consistency_img2img.py
ADDED
|
@@ -0,0 +1,825 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
|
| 16 |
+
# and https://github.com/hojonathanho/diffusion
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import PIL.Image
|
| 24 |
+
import torch
|
| 25 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 26 |
+
|
| 27 |
+
from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
|
| 28 |
+
from diffusers.configuration_utils import register_to_config
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 31 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 32 |
+
from diffusers.utils import BaseOutput
|
| 33 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
|
| 40 |
+
_optional_components = ["scheduler"]
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
vae: AutoencoderKL,
|
| 45 |
+
text_encoder: CLIPTextModel,
|
| 46 |
+
tokenizer: CLIPTokenizer,
|
| 47 |
+
unet: UNet2DConditionModel,
|
| 48 |
+
scheduler: "LCMSchedulerWithTimestamp",
|
| 49 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 50 |
+
feature_extractor: CLIPImageProcessor,
|
| 51 |
+
requires_safety_checker: bool = True,
|
| 52 |
+
):
|
| 53 |
+
super().__init__()
|
| 54 |
+
|
| 55 |
+
scheduler = (
|
| 56 |
+
scheduler
|
| 57 |
+
if scheduler is not None
|
| 58 |
+
else LCMSchedulerWithTimestamp(
|
| 59 |
+
beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
|
| 60 |
+
)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
self.register_modules(
|
| 64 |
+
vae=vae,
|
| 65 |
+
text_encoder=text_encoder,
|
| 66 |
+
tokenizer=tokenizer,
|
| 67 |
+
unet=unet,
|
| 68 |
+
scheduler=scheduler,
|
| 69 |
+
safety_checker=safety_checker,
|
| 70 |
+
feature_extractor=feature_extractor,
|
| 71 |
+
)
|
| 72 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 73 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 74 |
+
|
| 75 |
+
def _encode_prompt(
|
| 76 |
+
self,
|
| 77 |
+
prompt,
|
| 78 |
+
device,
|
| 79 |
+
num_images_per_prompt,
|
| 80 |
+
prompt_embeds: None,
|
| 81 |
+
):
|
| 82 |
+
r"""
|
| 83 |
+
Encodes the prompt into text encoder hidden states.
|
| 84 |
+
Args:
|
| 85 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 86 |
+
prompt to be encoded
|
| 87 |
+
device: (`torch.device`):
|
| 88 |
+
torch device
|
| 89 |
+
num_images_per_prompt (`int`):
|
| 90 |
+
number of images that should be generated per prompt
|
| 91 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 92 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 93 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
if prompt is not None and isinstance(prompt, str):
|
| 97 |
+
pass
|
| 98 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 99 |
+
len(prompt)
|
| 100 |
+
else:
|
| 101 |
+
prompt_embeds.shape[0]
|
| 102 |
+
|
| 103 |
+
if prompt_embeds is None:
|
| 104 |
+
text_inputs = self.tokenizer(
|
| 105 |
+
prompt,
|
| 106 |
+
padding="max_length",
|
| 107 |
+
max_length=self.tokenizer.model_max_length,
|
| 108 |
+
truncation=True,
|
| 109 |
+
return_tensors="pt",
|
| 110 |
+
)
|
| 111 |
+
text_input_ids = text_inputs.input_ids
|
| 112 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 113 |
+
|
| 114 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 115 |
+
text_input_ids, untruncated_ids
|
| 116 |
+
):
|
| 117 |
+
removed_text = self.tokenizer.batch_decode(
|
| 118 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 119 |
+
)
|
| 120 |
+
logger.warning(
|
| 121 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 122 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 126 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 127 |
+
else:
|
| 128 |
+
attention_mask = None
|
| 129 |
+
|
| 130 |
+
prompt_embeds = self.text_encoder(
|
| 131 |
+
text_input_ids.to(device),
|
| 132 |
+
attention_mask=attention_mask,
|
| 133 |
+
)
|
| 134 |
+
prompt_embeds = prompt_embeds[0]
|
| 135 |
+
|
| 136 |
+
if self.text_encoder is not None:
|
| 137 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 138 |
+
elif self.unet is not None:
|
| 139 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 140 |
+
else:
|
| 141 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 142 |
+
|
| 143 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 144 |
+
|
| 145 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 146 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 147 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 148 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 149 |
+
|
| 150 |
+
# Don't need to get uncond prompt embedding because of LCM Guided Distillation
|
| 151 |
+
return prompt_embeds
|
| 152 |
+
|
| 153 |
+
def run_safety_checker(self, image, device, dtype):
|
| 154 |
+
if self.safety_checker is None:
|
| 155 |
+
has_nsfw_concept = None
|
| 156 |
+
else:
|
| 157 |
+
if torch.is_tensor(image):
|
| 158 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 159 |
+
else:
|
| 160 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 161 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 162 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 163 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 164 |
+
)
|
| 165 |
+
return image, has_nsfw_concept
|
| 166 |
+
|
| 167 |
+
def prepare_latents(
|
| 168 |
+
self,
|
| 169 |
+
image,
|
| 170 |
+
timestep,
|
| 171 |
+
batch_size,
|
| 172 |
+
num_channels_latents,
|
| 173 |
+
height,
|
| 174 |
+
width,
|
| 175 |
+
dtype,
|
| 176 |
+
device,
|
| 177 |
+
latents=None,
|
| 178 |
+
generator=None,
|
| 179 |
+
):
|
| 180 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 181 |
+
|
| 182 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 183 |
+
raise ValueError(
|
| 184 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
image = image.to(device=device, dtype=dtype)
|
| 188 |
+
|
| 189 |
+
# batch_size = batch_size * num_images_per_prompt
|
| 190 |
+
|
| 191 |
+
if image.shape[1] == 4:
|
| 192 |
+
init_latents = image
|
| 193 |
+
|
| 194 |
+
else:
|
| 195 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 196 |
+
raise ValueError(
|
| 197 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 198 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
elif isinstance(generator, list):
|
| 202 |
+
init_latents = [
|
| 203 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 204 |
+
]
|
| 205 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 206 |
+
else:
|
| 207 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 208 |
+
|
| 209 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 210 |
+
|
| 211 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 212 |
+
# expand init_latents for batch_size
|
| 213 |
+
(
|
| 214 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 215 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 216 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 217 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 218 |
+
)
|
| 219 |
+
# deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 220 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 221 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 222 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 223 |
+
raise ValueError(
|
| 224 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 225 |
+
)
|
| 226 |
+
else:
|
| 227 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 228 |
+
|
| 229 |
+
shape = init_latents.shape
|
| 230 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 231 |
+
|
| 232 |
+
# get latents
|
| 233 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 234 |
+
latents = init_latents
|
| 235 |
+
|
| 236 |
+
return latents
|
| 237 |
+
|
| 238 |
+
if latents is None:
|
| 239 |
+
latents = torch.randn(shape, dtype=dtype).to(device)
|
| 240 |
+
else:
|
| 241 |
+
latents = latents.to(device)
|
| 242 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 243 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 244 |
+
return latents
|
| 245 |
+
|
| 246 |
+
def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 247 |
+
"""
|
| 248 |
+
see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 249 |
+
Args:
|
| 250 |
+
timesteps: torch.Tensor: generate embedding vectors at these timesteps
|
| 251 |
+
embedding_dim: int: dimension of the embeddings to generate
|
| 252 |
+
dtype: data type of the generated embeddings
|
| 253 |
+
Returns:
|
| 254 |
+
embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 255 |
+
"""
|
| 256 |
+
assert len(w.shape) == 1
|
| 257 |
+
w = w * 1000.0
|
| 258 |
+
|
| 259 |
+
half_dim = embedding_dim // 2
|
| 260 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 261 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 262 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 263 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 264 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 265 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 266 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 267 |
+
return emb
|
| 268 |
+
|
| 269 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 270 |
+
# get the original timestep using init_timestep
|
| 271 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 272 |
+
|
| 273 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 274 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 275 |
+
|
| 276 |
+
return timesteps, num_inference_steps - t_start
|
| 277 |
+
|
| 278 |
+
@torch.no_grad()
|
| 279 |
+
def __call__(
|
| 280 |
+
self,
|
| 281 |
+
prompt: Union[str, List[str]] = None,
|
| 282 |
+
image: PipelineImageInput = None,
|
| 283 |
+
strength: float = 0.8,
|
| 284 |
+
height: Optional[int] = 768,
|
| 285 |
+
width: Optional[int] = 768,
|
| 286 |
+
guidance_scale: float = 7.5,
|
| 287 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 288 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 289 |
+
num_inference_steps: int = 4,
|
| 290 |
+
lcm_origin_steps: int = 50,
|
| 291 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 292 |
+
output_type: Optional[str] = "pil",
|
| 293 |
+
return_dict: bool = True,
|
| 294 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 295 |
+
):
|
| 296 |
+
# 0. Default height and width to unet
|
| 297 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 298 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 299 |
+
|
| 300 |
+
# 2. Define call parameters
|
| 301 |
+
if prompt is not None and isinstance(prompt, str):
|
| 302 |
+
batch_size = 1
|
| 303 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 304 |
+
batch_size = len(prompt)
|
| 305 |
+
else:
|
| 306 |
+
batch_size = prompt_embeds.shape[0]
|
| 307 |
+
|
| 308 |
+
device = self._execution_device
|
| 309 |
+
# do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
|
| 310 |
+
|
| 311 |
+
# 3. Encode input prompt
|
| 312 |
+
prompt_embeds = self._encode_prompt(
|
| 313 |
+
prompt,
|
| 314 |
+
device,
|
| 315 |
+
num_images_per_prompt,
|
| 316 |
+
prompt_embeds=prompt_embeds,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
# 3.5 encode image
|
| 320 |
+
image = self.image_processor.preprocess(image)
|
| 321 |
+
|
| 322 |
+
# 4. Prepare timesteps
|
| 323 |
+
self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps)
|
| 324 |
+
# timesteps = self.scheduler.timesteps
|
| 325 |
+
# timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)
|
| 326 |
+
timesteps = self.scheduler.timesteps
|
| 327 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 328 |
+
|
| 329 |
+
print("timesteps: ", timesteps)
|
| 330 |
+
|
| 331 |
+
# 5. Prepare latent variable
|
| 332 |
+
num_channels_latents = self.unet.config.in_channels
|
| 333 |
+
latents = self.prepare_latents(
|
| 334 |
+
image,
|
| 335 |
+
latent_timestep,
|
| 336 |
+
batch_size * num_images_per_prompt,
|
| 337 |
+
num_channels_latents,
|
| 338 |
+
height,
|
| 339 |
+
width,
|
| 340 |
+
prompt_embeds.dtype,
|
| 341 |
+
device,
|
| 342 |
+
latents,
|
| 343 |
+
)
|
| 344 |
+
bs = batch_size * num_images_per_prompt
|
| 345 |
+
|
| 346 |
+
# 6. Get Guidance Scale Embedding
|
| 347 |
+
w = torch.tensor(guidance_scale).repeat(bs)
|
| 348 |
+
w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
|
| 349 |
+
|
| 350 |
+
# 7. LCM MultiStep Sampling Loop:
|
| 351 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 352 |
+
for i, t in enumerate(timesteps):
|
| 353 |
+
ts = torch.full((bs,), t, device=device, dtype=torch.long)
|
| 354 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 355 |
+
|
| 356 |
+
# model prediction (v-prediction, eps, x)
|
| 357 |
+
model_pred = self.unet(
|
| 358 |
+
latents,
|
| 359 |
+
ts,
|
| 360 |
+
timestep_cond=w_embedding,
|
| 361 |
+
encoder_hidden_states=prompt_embeds,
|
| 362 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 363 |
+
return_dict=False,
|
| 364 |
+
)[0]
|
| 365 |
+
|
| 366 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 367 |
+
latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
|
| 368 |
+
|
| 369 |
+
# # call the callback, if provided
|
| 370 |
+
# if i == len(timesteps) - 1:
|
| 371 |
+
progress_bar.update()
|
| 372 |
+
|
| 373 |
+
denoised = denoised.to(prompt_embeds.dtype)
|
| 374 |
+
if not output_type == "latent":
|
| 375 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 376 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 377 |
+
else:
|
| 378 |
+
image = denoised
|
| 379 |
+
has_nsfw_concept = None
|
| 380 |
+
|
| 381 |
+
if has_nsfw_concept is None:
|
| 382 |
+
do_denormalize = [True] * image.shape[0]
|
| 383 |
+
else:
|
| 384 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 385 |
+
|
| 386 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 387 |
+
|
| 388 |
+
if not return_dict:
|
| 389 |
+
return (image, has_nsfw_concept)
|
| 390 |
+
|
| 391 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
@dataclass
|
| 395 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
|
| 396 |
+
class LCMSchedulerOutput(BaseOutput):
|
| 397 |
+
"""
|
| 398 |
+
Output class for the scheduler's `step` function output.
|
| 399 |
+
Args:
|
| 400 |
+
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 401 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 402 |
+
denoising loop.
|
| 403 |
+
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 404 |
+
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
|
| 405 |
+
`pred_original_sample` can be used to preview progress or for guidance.
|
| 406 |
+
"""
|
| 407 |
+
|
| 408 |
+
prev_sample: torch.FloatTensor
|
| 409 |
+
denoised: Optional[torch.FloatTensor] = None
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
| 413 |
+
def betas_for_alpha_bar(
|
| 414 |
+
num_diffusion_timesteps,
|
| 415 |
+
max_beta=0.999,
|
| 416 |
+
alpha_transform_type="cosine",
|
| 417 |
+
):
|
| 418 |
+
"""
|
| 419 |
+
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
| 420 |
+
(1-beta) over time from t = [0,1].
|
| 421 |
+
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
| 422 |
+
to that part of the diffusion process.
|
| 423 |
+
Args:
|
| 424 |
+
num_diffusion_timesteps (`int`): the number of betas to produce.
|
| 425 |
+
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
| 426 |
+
prevent singularities.
|
| 427 |
+
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
| 428 |
+
Choose from `cosine` or `exp`
|
| 429 |
+
Returns:
|
| 430 |
+
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
| 431 |
+
"""
|
| 432 |
+
if alpha_transform_type == "cosine":
|
| 433 |
+
|
| 434 |
+
def alpha_bar_fn(t):
|
| 435 |
+
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
| 436 |
+
|
| 437 |
+
elif alpha_transform_type == "exp":
|
| 438 |
+
|
| 439 |
+
def alpha_bar_fn(t):
|
| 440 |
+
return math.exp(t * -12.0)
|
| 441 |
+
|
| 442 |
+
else:
|
| 443 |
+
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
|
| 444 |
+
|
| 445 |
+
betas = []
|
| 446 |
+
for i in range(num_diffusion_timesteps):
|
| 447 |
+
t1 = i / num_diffusion_timesteps
|
| 448 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 449 |
+
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
| 450 |
+
return torch.tensor(betas, dtype=torch.float32)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def rescale_zero_terminal_snr(betas):
|
| 454 |
+
"""
|
| 455 |
+
Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
|
| 456 |
+
Args:
|
| 457 |
+
betas (`torch.FloatTensor`):
|
| 458 |
+
the betas that the scheduler is being initialized with.
|
| 459 |
+
Returns:
|
| 460 |
+
`torch.FloatTensor`: rescaled betas with zero terminal SNR
|
| 461 |
+
"""
|
| 462 |
+
# Convert betas to alphas_bar_sqrt
|
| 463 |
+
alphas = 1.0 - betas
|
| 464 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
| 465 |
+
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
| 466 |
+
|
| 467 |
+
# Store old values.
|
| 468 |
+
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
| 469 |
+
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
| 470 |
+
|
| 471 |
+
# Shift so the last timestep is zero.
|
| 472 |
+
alphas_bar_sqrt -= alphas_bar_sqrt_T
|
| 473 |
+
|
| 474 |
+
# Scale so the first timestep is back to the old value.
|
| 475 |
+
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
| 476 |
+
|
| 477 |
+
# Convert alphas_bar_sqrt to betas
|
| 478 |
+
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
| 479 |
+
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
|
| 480 |
+
alphas = torch.cat([alphas_bar[0:1], alphas])
|
| 481 |
+
betas = 1 - alphas
|
| 482 |
+
|
| 483 |
+
return betas
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
|
| 487 |
+
"""
|
| 488 |
+
This class modifies LCMScheduler to add a timestamp argument to set_timesteps
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
`LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
|
| 492 |
+
non-Markovian guidance.
|
| 493 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 494 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 495 |
+
Args:
|
| 496 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 497 |
+
The number of diffusion steps to train the model.
|
| 498 |
+
beta_start (`float`, defaults to 0.0001):
|
| 499 |
+
The starting `beta` value of inference.
|
| 500 |
+
beta_end (`float`, defaults to 0.02):
|
| 501 |
+
The final `beta` value.
|
| 502 |
+
beta_schedule (`str`, defaults to `"linear"`):
|
| 503 |
+
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
| 504 |
+
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
| 505 |
+
trained_betas (`np.ndarray`, *optional*):
|
| 506 |
+
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
|
| 507 |
+
clip_sample (`bool`, defaults to `True`):
|
| 508 |
+
Clip the predicted sample for numerical stability.
|
| 509 |
+
clip_sample_range (`float`, defaults to 1.0):
|
| 510 |
+
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
|
| 511 |
+
set_alpha_to_one (`bool`, defaults to `True`):
|
| 512 |
+
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
|
| 513 |
+
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
|
| 514 |
+
otherwise it uses the alpha value at step 0.
|
| 515 |
+
steps_offset (`int`, defaults to 0):
|
| 516 |
+
An offset added to the inference steps, as required by some model families.
|
| 517 |
+
prediction_type (`str`, defaults to `epsilon`, *optional*):
|
| 518 |
+
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
|
| 519 |
+
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
|
| 520 |
+
Video](https://imagen.research.google/video/paper.pdf) paper).
|
| 521 |
+
thresholding (`bool`, defaults to `False`):
|
| 522 |
+
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
|
| 523 |
+
as Stable Diffusion.
|
| 524 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 525 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 526 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 527 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
|
| 528 |
+
timestep_spacing (`str`, defaults to `"leading"`):
|
| 529 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 530 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 531 |
+
rescale_betas_zero_snr (`bool`, defaults to `False`):
|
| 532 |
+
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
|
| 533 |
+
dark samples instead of limiting it to samples with medium brightness. Loosely related to
|
| 534 |
+
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
# _compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
| 538 |
+
order = 1
|
| 539 |
+
|
| 540 |
+
@register_to_config
|
| 541 |
+
def __init__(
|
| 542 |
+
self,
|
| 543 |
+
num_train_timesteps: int = 1000,
|
| 544 |
+
beta_start: float = 0.0001,
|
| 545 |
+
beta_end: float = 0.02,
|
| 546 |
+
beta_schedule: str = "linear",
|
| 547 |
+
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
| 548 |
+
clip_sample: bool = True,
|
| 549 |
+
set_alpha_to_one: bool = True,
|
| 550 |
+
steps_offset: int = 0,
|
| 551 |
+
prediction_type: str = "epsilon",
|
| 552 |
+
thresholding: bool = False,
|
| 553 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 554 |
+
clip_sample_range: float = 1.0,
|
| 555 |
+
sample_max_value: float = 1.0,
|
| 556 |
+
timestep_spacing: str = "leading",
|
| 557 |
+
rescale_betas_zero_snr: bool = False,
|
| 558 |
+
):
|
| 559 |
+
if trained_betas is not None:
|
| 560 |
+
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
| 561 |
+
elif beta_schedule == "linear":
|
| 562 |
+
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
| 563 |
+
elif beta_schedule == "scaled_linear":
|
| 564 |
+
# this schedule is very specific to the latent diffusion model.
|
| 565 |
+
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
| 566 |
+
elif beta_schedule == "squaredcos_cap_v2":
|
| 567 |
+
# Glide cosine schedule
|
| 568 |
+
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
| 569 |
+
else:
|
| 570 |
+
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
| 571 |
+
|
| 572 |
+
# Rescale for zero SNR
|
| 573 |
+
if rescale_betas_zero_snr:
|
| 574 |
+
self.betas = rescale_zero_terminal_snr(self.betas)
|
| 575 |
+
|
| 576 |
+
self.alphas = 1.0 - self.betas
|
| 577 |
+
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
| 578 |
+
|
| 579 |
+
# At every step in ddim, we are looking into the previous alphas_cumprod
|
| 580 |
+
# For the final step, there is no previous alphas_cumprod because we are already at 0
|
| 581 |
+
# `set_alpha_to_one` decides whether we set this parameter simply to one or
|
| 582 |
+
# whether we use the final alpha of the "non-previous" one.
|
| 583 |
+
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
|
| 584 |
+
|
| 585 |
+
# standard deviation of the initial noise distribution
|
| 586 |
+
self.init_noise_sigma = 1.0
|
| 587 |
+
|
| 588 |
+
# setable values
|
| 589 |
+
self.num_inference_steps = None
|
| 590 |
+
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
| 591 |
+
|
| 592 |
+
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
|
| 593 |
+
"""
|
| 594 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 595 |
+
current timestep.
|
| 596 |
+
Args:
|
| 597 |
+
sample (`torch.FloatTensor`):
|
| 598 |
+
The input sample.
|
| 599 |
+
timestep (`int`, *optional*):
|
| 600 |
+
The current timestep in the diffusion chain.
|
| 601 |
+
Returns:
|
| 602 |
+
`torch.FloatTensor`:
|
| 603 |
+
A scaled input sample.
|
| 604 |
+
"""
|
| 605 |
+
return sample
|
| 606 |
+
|
| 607 |
+
def _get_variance(self, timestep, prev_timestep):
|
| 608 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 609 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 610 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 611 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 612 |
+
|
| 613 |
+
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
|
| 614 |
+
|
| 615 |
+
return variance
|
| 616 |
+
|
| 617 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 618 |
+
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
|
| 619 |
+
"""
|
| 620 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 621 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 622 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 623 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 624 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 625 |
+
https://arxiv.org/abs/2205.11487
|
| 626 |
+
"""
|
| 627 |
+
dtype = sample.dtype
|
| 628 |
+
batch_size, channels, height, width = sample.shape
|
| 629 |
+
|
| 630 |
+
if dtype not in (torch.float32, torch.float64):
|
| 631 |
+
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 632 |
+
|
| 633 |
+
# Flatten sample for doing quantile calculation along each image
|
| 634 |
+
sample = sample.reshape(batch_size, channels * height * width)
|
| 635 |
+
|
| 636 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 637 |
+
|
| 638 |
+
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 639 |
+
s = torch.clamp(
|
| 640 |
+
s, min=1, max=self.config.sample_max_value
|
| 641 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 642 |
+
|
| 643 |
+
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 644 |
+
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 645 |
+
|
| 646 |
+
sample = sample.reshape(batch_size, channels, height, width)
|
| 647 |
+
sample = sample.to(dtype)
|
| 648 |
+
|
| 649 |
+
return sample
|
| 650 |
+
|
| 651 |
+
def set_timesteps(
|
| 652 |
+
self, stength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
|
| 653 |
+
):
|
| 654 |
+
"""
|
| 655 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 656 |
+
Args:
|
| 657 |
+
num_inference_steps (`int`):
|
| 658 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 659 |
+
"""
|
| 660 |
+
|
| 661 |
+
if num_inference_steps > self.config.num_train_timesteps:
|
| 662 |
+
raise ValueError(
|
| 663 |
+
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
|
| 664 |
+
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
|
| 665 |
+
f" maximal {self.config.num_train_timesteps} timesteps."
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
self.num_inference_steps = num_inference_steps
|
| 669 |
+
|
| 670 |
+
# LCM Timesteps Setting: # Linear Spacing
|
| 671 |
+
c = self.config.num_train_timesteps // lcm_origin_steps
|
| 672 |
+
lcm_origin_timesteps = (
|
| 673 |
+
np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1
|
| 674 |
+
) # LCM Training Steps Schedule
|
| 675 |
+
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
| 676 |
+
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
| 677 |
+
|
| 678 |
+
self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
|
| 679 |
+
|
| 680 |
+
def get_scalings_for_boundary_condition_discrete(self, t):
|
| 681 |
+
self.sigma_data = 0.5 # Default: 0.5
|
| 682 |
+
|
| 683 |
+
# By dividing 0.1: This is almost a delta function at t=0.
|
| 684 |
+
c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
|
| 685 |
+
c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
|
| 686 |
+
return c_skip, c_out
|
| 687 |
+
|
| 688 |
+
def step(
|
| 689 |
+
self,
|
| 690 |
+
model_output: torch.FloatTensor,
|
| 691 |
+
timeindex: int,
|
| 692 |
+
timestep: int,
|
| 693 |
+
sample: torch.FloatTensor,
|
| 694 |
+
eta: float = 0.0,
|
| 695 |
+
use_clipped_model_output: bool = False,
|
| 696 |
+
generator=None,
|
| 697 |
+
variance_noise: Optional[torch.FloatTensor] = None,
|
| 698 |
+
return_dict: bool = True,
|
| 699 |
+
) -> Union[LCMSchedulerOutput, Tuple]:
|
| 700 |
+
"""
|
| 701 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 702 |
+
process from the learned model outputs (most often the predicted noise).
|
| 703 |
+
Args:
|
| 704 |
+
model_output (`torch.FloatTensor`):
|
| 705 |
+
The direct output from learned diffusion model.
|
| 706 |
+
timestep (`float`):
|
| 707 |
+
The current discrete timestep in the diffusion chain.
|
| 708 |
+
sample (`torch.FloatTensor`):
|
| 709 |
+
A current instance of a sample created by the diffusion process.
|
| 710 |
+
eta (`float`):
|
| 711 |
+
The weight of noise for added noise in diffusion step.
|
| 712 |
+
use_clipped_model_output (`bool`, defaults to `False`):
|
| 713 |
+
If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
|
| 714 |
+
because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
|
| 715 |
+
clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
|
| 716 |
+
`use_clipped_model_output` has no effect.
|
| 717 |
+
generator (`torch.Generator`, *optional*):
|
| 718 |
+
A random number generator.
|
| 719 |
+
variance_noise (`torch.FloatTensor`):
|
| 720 |
+
Alternative to generating noise with `generator` by directly providing the noise for the variance
|
| 721 |
+
itself. Useful for methods such as [`CycleDiffusion`].
|
| 722 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 723 |
+
Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
|
| 724 |
+
Returns:
|
| 725 |
+
[`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
|
| 726 |
+
If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
|
| 727 |
+
tuple is returned where the first element is the sample tensor.
|
| 728 |
+
"""
|
| 729 |
+
if self.num_inference_steps is None:
|
| 730 |
+
raise ValueError(
|
| 731 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
# 1. get previous step value
|
| 735 |
+
prev_timeindex = timeindex + 1
|
| 736 |
+
if prev_timeindex < len(self.timesteps):
|
| 737 |
+
prev_timestep = self.timesteps[prev_timeindex]
|
| 738 |
+
else:
|
| 739 |
+
prev_timestep = timestep
|
| 740 |
+
|
| 741 |
+
# 2. compute alphas, betas
|
| 742 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 743 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 744 |
+
|
| 745 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 746 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 747 |
+
|
| 748 |
+
# 3. Get scalings for boundary conditions
|
| 749 |
+
c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
|
| 750 |
+
|
| 751 |
+
# 4. Different Parameterization:
|
| 752 |
+
parameterization = self.config.prediction_type
|
| 753 |
+
|
| 754 |
+
if parameterization == "epsilon": # noise-prediction
|
| 755 |
+
pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
|
| 756 |
+
|
| 757 |
+
elif parameterization == "sample": # x-prediction
|
| 758 |
+
pred_x0 = model_output
|
| 759 |
+
|
| 760 |
+
elif parameterization == "v_prediction": # v-prediction
|
| 761 |
+
pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
|
| 762 |
+
|
| 763 |
+
# 4. Denoise model output using boundary conditions
|
| 764 |
+
denoised = c_out * pred_x0 + c_skip * sample
|
| 765 |
+
|
| 766 |
+
# 5. Sample z ~ N(0, I), For MultiStep Inference
|
| 767 |
+
# Noise is not used for one-step sampling.
|
| 768 |
+
if len(self.timesteps) > 1:
|
| 769 |
+
noise = torch.randn(model_output.shape).to(model_output.device)
|
| 770 |
+
prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
|
| 771 |
+
else:
|
| 772 |
+
prev_sample = denoised
|
| 773 |
+
|
| 774 |
+
if not return_dict:
|
| 775 |
+
return (prev_sample, denoised)
|
| 776 |
+
|
| 777 |
+
return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
|
| 778 |
+
|
| 779 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
| 780 |
+
def add_noise(
|
| 781 |
+
self,
|
| 782 |
+
original_samples: torch.FloatTensor,
|
| 783 |
+
noise: torch.FloatTensor,
|
| 784 |
+
timesteps: torch.IntTensor,
|
| 785 |
+
) -> torch.FloatTensor:
|
| 786 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
| 787 |
+
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
| 788 |
+
timesteps = timesteps.to(original_samples.device)
|
| 789 |
+
|
| 790 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 791 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 792 |
+
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
| 793 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 794 |
+
|
| 795 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 796 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 797 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
| 798 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 799 |
+
|
| 800 |
+
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
| 801 |
+
return noisy_samples
|
| 802 |
+
|
| 803 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
|
| 804 |
+
def get_velocity(
|
| 805 |
+
self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
|
| 806 |
+
) -> torch.FloatTensor:
|
| 807 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as sample
|
| 808 |
+
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
|
| 809 |
+
timesteps = timesteps.to(sample.device)
|
| 810 |
+
|
| 811 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 812 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 813 |
+
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
| 814 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 815 |
+
|
| 816 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 817 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 818 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
| 819 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 820 |
+
|
| 821 |
+
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
| 822 |
+
return velocity
|
| 823 |
+
|
| 824 |
+
def __len__(self):
|
| 825 |
+
return self.config.num_train_timesteps
|
v0.27.0/latent_consistency_interpolate.py
ADDED
|
@@ -0,0 +1,990 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 7 |
+
|
| 8 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 9 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 10 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 11 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 12 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
| 14 |
+
from diffusers.schedulers import LCMScheduler
|
| 15 |
+
from diffusers.utils import (
|
| 16 |
+
USE_PEFT_BACKEND,
|
| 17 |
+
deprecate,
|
| 18 |
+
logging,
|
| 19 |
+
replace_example_docstring,
|
| 20 |
+
scale_lora_layers,
|
| 21 |
+
unscale_lora_layers,
|
| 22 |
+
)
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 27 |
+
|
| 28 |
+
EXAMPLE_DOC_STRING = """
|
| 29 |
+
Examples:
|
| 30 |
+
```py
|
| 31 |
+
>>> import torch
|
| 32 |
+
>>> import numpy as np
|
| 33 |
+
|
| 34 |
+
>>> from diffusers import DiffusionPipeline
|
| 35 |
+
|
| 36 |
+
>>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate")
|
| 37 |
+
>>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
|
| 38 |
+
>>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)
|
| 39 |
+
|
| 40 |
+
>>> prompts = ["A cat", "A dog", "A horse"]
|
| 41 |
+
>>> num_inference_steps = 4
|
| 42 |
+
>>> num_interpolation_steps = 24
|
| 43 |
+
>>> seed = 1337
|
| 44 |
+
|
| 45 |
+
>>> torch.manual_seed(seed)
|
| 46 |
+
>>> np.random.seed(seed)
|
| 47 |
+
|
| 48 |
+
>>> images = pipe(
|
| 49 |
+
prompt=prompts,
|
| 50 |
+
height=512,
|
| 51 |
+
width=512,
|
| 52 |
+
num_inference_steps=num_inference_steps,
|
| 53 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 54 |
+
guidance_scale=8.0,
|
| 55 |
+
embedding_interpolation_type="lerp",
|
| 56 |
+
latent_interpolation_type="slerp",
|
| 57 |
+
process_batch_size=4, # Make it higher or lower based on your GPU memory
|
| 58 |
+
generator=torch.Generator(seed),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
>>> # Save the images as a video
|
| 62 |
+
>>> import imageio
|
| 63 |
+
>>> from PIL import Image
|
| 64 |
+
|
| 65 |
+
>>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None:
|
| 66 |
+
frames = [np.array(image) for image in images]
|
| 67 |
+
with imageio.get_writer(filename, fps=fps) as video_writer:
|
| 68 |
+
for frame in frames:
|
| 69 |
+
video_writer.append_data(frame)
|
| 70 |
+
|
| 71 |
+
>>> pil_to_video(images, "lcm_interpolate.mp4", fps=24)
|
| 72 |
+
```
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def lerp(
|
| 77 |
+
v0: Union[torch.Tensor, np.ndarray],
|
| 78 |
+
v1: Union[torch.Tensor, np.ndarray],
|
| 79 |
+
t: Union[float, torch.Tensor, np.ndarray],
|
| 80 |
+
) -> Union[torch.Tensor, np.ndarray]:
|
| 81 |
+
"""
|
| 82 |
+
Linearly interpolate between two vectors/tensors.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
|
| 86 |
+
v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
|
| 87 |
+
t: (`float`, `torch.Tensor`, or `np.ndarray`):
|
| 88 |
+
Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
|
| 89 |
+
torch.Tensor, must be one dimensional with values between 0 and 1.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
Union[torch.Tensor, np.ndarray]
|
| 93 |
+
Interpolated vector/tensor between v0 and v1.
|
| 94 |
+
"""
|
| 95 |
+
inputs_are_torch = False
|
| 96 |
+
t_is_float = False
|
| 97 |
+
|
| 98 |
+
if isinstance(v0, torch.Tensor):
|
| 99 |
+
inputs_are_torch = True
|
| 100 |
+
input_device = v0.device
|
| 101 |
+
v0 = v0.cpu().numpy()
|
| 102 |
+
v1 = v1.cpu().numpy()
|
| 103 |
+
|
| 104 |
+
if isinstance(t, torch.Tensor):
|
| 105 |
+
inputs_are_torch = True
|
| 106 |
+
input_device = t.device
|
| 107 |
+
t = t.cpu().numpy()
|
| 108 |
+
elif isinstance(t, float):
|
| 109 |
+
t_is_float = True
|
| 110 |
+
t = np.array([t])
|
| 111 |
+
|
| 112 |
+
t = t[..., None]
|
| 113 |
+
v0 = v0[None, ...]
|
| 114 |
+
v1 = v1[None, ...]
|
| 115 |
+
v2 = (1 - t) * v0 + t * v1
|
| 116 |
+
|
| 117 |
+
if t_is_float and v0.ndim > 1:
|
| 118 |
+
assert v2.shape[0] == 1
|
| 119 |
+
v2 = np.squeeze(v2, axis=0)
|
| 120 |
+
if inputs_are_torch:
|
| 121 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 122 |
+
|
| 123 |
+
return v2
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def slerp(
|
| 127 |
+
v0: Union[torch.Tensor, np.ndarray],
|
| 128 |
+
v1: Union[torch.Tensor, np.ndarray],
|
| 129 |
+
t: Union[float, torch.Tensor, np.ndarray],
|
| 130 |
+
DOT_THRESHOLD=0.9995,
|
| 131 |
+
) -> Union[torch.Tensor, np.ndarray]:
|
| 132 |
+
"""
|
| 133 |
+
Spherical linear interpolation between two vectors/tensors.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
|
| 137 |
+
v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
|
| 138 |
+
t: (`float`, `torch.Tensor`, or `np.ndarray`):
|
| 139 |
+
Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
|
| 140 |
+
torch.Tensor, must be one dimensional with values between 0 and 1.
|
| 141 |
+
DOT_THRESHOLD (`float`, *optional*, default=0.9995):
|
| 142 |
+
Threshold for when to use linear interpolation instead of spherical interpolation.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
`torch.Tensor` or `np.ndarray`:
|
| 146 |
+
Interpolated vector/tensor between v0 and v1.
|
| 147 |
+
"""
|
| 148 |
+
inputs_are_torch = False
|
| 149 |
+
t_is_float = False
|
| 150 |
+
|
| 151 |
+
if isinstance(v0, torch.Tensor):
|
| 152 |
+
inputs_are_torch = True
|
| 153 |
+
input_device = v0.device
|
| 154 |
+
v0 = v0.cpu().numpy()
|
| 155 |
+
v1 = v1.cpu().numpy()
|
| 156 |
+
|
| 157 |
+
if isinstance(t, torch.Tensor):
|
| 158 |
+
inputs_are_torch = True
|
| 159 |
+
input_device = t.device
|
| 160 |
+
t = t.cpu().numpy()
|
| 161 |
+
elif isinstance(t, float):
|
| 162 |
+
t_is_float = True
|
| 163 |
+
t = np.array([t], dtype=v0.dtype)
|
| 164 |
+
|
| 165 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 166 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 167 |
+
# v1 and v2 are close to parallel
|
| 168 |
+
# Use linear interpolation instead
|
| 169 |
+
v2 = lerp(v0, v1, t)
|
| 170 |
+
else:
|
| 171 |
+
theta_0 = np.arccos(dot)
|
| 172 |
+
sin_theta_0 = np.sin(theta_0)
|
| 173 |
+
theta_t = theta_0 * t
|
| 174 |
+
sin_theta_t = np.sin(theta_t)
|
| 175 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 176 |
+
s1 = sin_theta_t / sin_theta_0
|
| 177 |
+
s0 = s0[..., None]
|
| 178 |
+
s1 = s1[..., None]
|
| 179 |
+
v0 = v0[None, ...]
|
| 180 |
+
v1 = v1[None, ...]
|
| 181 |
+
v2 = s0 * v0 + s1 * v1
|
| 182 |
+
|
| 183 |
+
if t_is_float and v0.ndim > 1:
|
| 184 |
+
assert v2.shape[0] == 1
|
| 185 |
+
v2 = np.squeeze(v2, axis=0)
|
| 186 |
+
if inputs_are_torch:
|
| 187 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 188 |
+
|
| 189 |
+
return v2
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class LatentConsistencyModelWalkPipeline(
|
| 193 |
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 194 |
+
):
|
| 195 |
+
r"""
|
| 196 |
+
Pipeline for text-to-image generation using a latent consistency model.
|
| 197 |
+
|
| 198 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 199 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 200 |
+
|
| 201 |
+
The pipeline also inherits the following loading methods:
|
| 202 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 203 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 204 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 205 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
vae ([`AutoencoderKL`]):
|
| 209 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 210 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 211 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 212 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 213 |
+
A `CLIPTokenizer` to tokenize text.
|
| 214 |
+
unet ([`UNet2DConditionModel`]):
|
| 215 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 216 |
+
scheduler ([`SchedulerMixin`]):
|
| 217 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only
|
| 218 |
+
supports [`LCMScheduler`].
|
| 219 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 220 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 221 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 222 |
+
about a model's potential harms.
|
| 223 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 224 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 225 |
+
requires_safety_checker (`bool`, *optional*, defaults to `True`):
|
| 226 |
+
Whether the pipeline requires a safety checker component.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 230 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 231 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 232 |
+
_callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"]
|
| 233 |
+
|
| 234 |
+
def __init__(
|
| 235 |
+
self,
|
| 236 |
+
vae: AutoencoderKL,
|
| 237 |
+
text_encoder: CLIPTextModel,
|
| 238 |
+
tokenizer: CLIPTokenizer,
|
| 239 |
+
unet: UNet2DConditionModel,
|
| 240 |
+
scheduler: LCMScheduler,
|
| 241 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 242 |
+
feature_extractor: CLIPImageProcessor,
|
| 243 |
+
requires_safety_checker: bool = True,
|
| 244 |
+
):
|
| 245 |
+
super().__init__()
|
| 246 |
+
|
| 247 |
+
if safety_checker is None and requires_safety_checker:
|
| 248 |
+
logger.warning(
|
| 249 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 250 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 251 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 252 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 253 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 254 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if safety_checker is not None and feature_extractor is None:
|
| 258 |
+
raise ValueError(
|
| 259 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 260 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
self.register_modules(
|
| 264 |
+
vae=vae,
|
| 265 |
+
text_encoder=text_encoder,
|
| 266 |
+
tokenizer=tokenizer,
|
| 267 |
+
unet=unet,
|
| 268 |
+
scheduler=scheduler,
|
| 269 |
+
safety_checker=safety_checker,
|
| 270 |
+
feature_extractor=feature_extractor,
|
| 271 |
+
)
|
| 272 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 273 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 274 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 275 |
+
|
| 276 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
|
| 277 |
+
def encode_prompt(
|
| 278 |
+
self,
|
| 279 |
+
prompt,
|
| 280 |
+
device,
|
| 281 |
+
num_images_per_prompt,
|
| 282 |
+
do_classifier_free_guidance,
|
| 283 |
+
negative_prompt=None,
|
| 284 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 285 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 286 |
+
lora_scale: Optional[float] = None,
|
| 287 |
+
clip_skip: Optional[int] = None,
|
| 288 |
+
):
|
| 289 |
+
r"""
|
| 290 |
+
Encodes the prompt into text encoder hidden states.
|
| 291 |
+
|
| 292 |
+
Args:
|
| 293 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 294 |
+
prompt to be encoded
|
| 295 |
+
device: (`torch.device`):
|
| 296 |
+
torch device
|
| 297 |
+
num_images_per_prompt (`int`):
|
| 298 |
+
number of images that should be generated per prompt
|
| 299 |
+
do_classifier_free_guidance (`bool`):
|
| 300 |
+
whether to use classifier free guidance or not
|
| 301 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 302 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 303 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 304 |
+
less than `1`).
|
| 305 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 306 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 307 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 308 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 309 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 310 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 311 |
+
argument.
|
| 312 |
+
lora_scale (`float`, *optional*):
|
| 313 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 314 |
+
clip_skip (`int`, *optional*):
|
| 315 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 316 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 317 |
+
"""
|
| 318 |
+
# set lora scale so that monkey patched LoRA
|
| 319 |
+
# function of text encoder can correctly access it
|
| 320 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 321 |
+
self._lora_scale = lora_scale
|
| 322 |
+
|
| 323 |
+
# dynamically adjust the LoRA scale
|
| 324 |
+
if not USE_PEFT_BACKEND:
|
| 325 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 326 |
+
else:
|
| 327 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 328 |
+
|
| 329 |
+
if prompt is not None and isinstance(prompt, str):
|
| 330 |
+
batch_size = 1
|
| 331 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 332 |
+
batch_size = len(prompt)
|
| 333 |
+
else:
|
| 334 |
+
batch_size = prompt_embeds.shape[0]
|
| 335 |
+
|
| 336 |
+
if prompt_embeds is None:
|
| 337 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 338 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 339 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 340 |
+
|
| 341 |
+
text_inputs = self.tokenizer(
|
| 342 |
+
prompt,
|
| 343 |
+
padding="max_length",
|
| 344 |
+
max_length=self.tokenizer.model_max_length,
|
| 345 |
+
truncation=True,
|
| 346 |
+
return_tensors="pt",
|
| 347 |
+
)
|
| 348 |
+
text_input_ids = text_inputs.input_ids
|
| 349 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 350 |
+
|
| 351 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 352 |
+
text_input_ids, untruncated_ids
|
| 353 |
+
):
|
| 354 |
+
removed_text = self.tokenizer.batch_decode(
|
| 355 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 356 |
+
)
|
| 357 |
+
logger.warning(
|
| 358 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 359 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 363 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 364 |
+
else:
|
| 365 |
+
attention_mask = None
|
| 366 |
+
|
| 367 |
+
if clip_skip is None:
|
| 368 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 369 |
+
prompt_embeds = prompt_embeds[0]
|
| 370 |
+
else:
|
| 371 |
+
prompt_embeds = self.text_encoder(
|
| 372 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 373 |
+
)
|
| 374 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 375 |
+
# all the hidden states from the encoder layers. Then index into
|
| 376 |
+
# the tuple to access the hidden states from the desired layer.
|
| 377 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 378 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 379 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 380 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 381 |
+
# layer.
|
| 382 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 383 |
+
|
| 384 |
+
if self.text_encoder is not None:
|
| 385 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 386 |
+
elif self.unet is not None:
|
| 387 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 388 |
+
else:
|
| 389 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 390 |
+
|
| 391 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 392 |
+
|
| 393 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 394 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 395 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 396 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 397 |
+
|
| 398 |
+
# get unconditional embeddings for classifier free guidance
|
| 399 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 400 |
+
uncond_tokens: List[str]
|
| 401 |
+
if negative_prompt is None:
|
| 402 |
+
uncond_tokens = [""] * batch_size
|
| 403 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 404 |
+
raise TypeError(
|
| 405 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 406 |
+
f" {type(prompt)}."
|
| 407 |
+
)
|
| 408 |
+
elif isinstance(negative_prompt, str):
|
| 409 |
+
uncond_tokens = [negative_prompt]
|
| 410 |
+
elif batch_size != len(negative_prompt):
|
| 411 |
+
raise ValueError(
|
| 412 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 413 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 414 |
+
" the batch size of `prompt`."
|
| 415 |
+
)
|
| 416 |
+
else:
|
| 417 |
+
uncond_tokens = negative_prompt
|
| 418 |
+
|
| 419 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 420 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 421 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 422 |
+
|
| 423 |
+
max_length = prompt_embeds.shape[1]
|
| 424 |
+
uncond_input = self.tokenizer(
|
| 425 |
+
uncond_tokens,
|
| 426 |
+
padding="max_length",
|
| 427 |
+
max_length=max_length,
|
| 428 |
+
truncation=True,
|
| 429 |
+
return_tensors="pt",
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 433 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 434 |
+
else:
|
| 435 |
+
attention_mask = None
|
| 436 |
+
|
| 437 |
+
negative_prompt_embeds = self.text_encoder(
|
| 438 |
+
uncond_input.input_ids.to(device),
|
| 439 |
+
attention_mask=attention_mask,
|
| 440 |
+
)
|
| 441 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 442 |
+
|
| 443 |
+
if do_classifier_free_guidance:
|
| 444 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 445 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 446 |
+
|
| 447 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 448 |
+
|
| 449 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 450 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 451 |
+
|
| 452 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 453 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 454 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 455 |
+
|
| 456 |
+
return prompt_embeds, negative_prompt_embeds
|
| 457 |
+
|
| 458 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 459 |
+
def run_safety_checker(self, image, device, dtype):
|
| 460 |
+
if self.safety_checker is None:
|
| 461 |
+
has_nsfw_concept = None
|
| 462 |
+
else:
|
| 463 |
+
if torch.is_tensor(image):
|
| 464 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 465 |
+
else:
|
| 466 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 467 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 468 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 469 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 470 |
+
)
|
| 471 |
+
return image, has_nsfw_concept
|
| 472 |
+
|
| 473 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 474 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 475 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 476 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 477 |
+
raise ValueError(
|
| 478 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 479 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if latents is None:
|
| 483 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 484 |
+
else:
|
| 485 |
+
latents = latents.to(device)
|
| 486 |
+
|
| 487 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 488 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 489 |
+
return latents
|
| 490 |
+
|
| 491 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 492 |
+
"""
|
| 493 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
timesteps (`torch.Tensor`):
|
| 497 |
+
generate embedding vectors at these timesteps
|
| 498 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 499 |
+
dimension of the embeddings to generate
|
| 500 |
+
dtype:
|
| 501 |
+
data type of the generated embeddings
|
| 502 |
+
|
| 503 |
+
Returns:
|
| 504 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 505 |
+
"""
|
| 506 |
+
assert len(w.shape) == 1
|
| 507 |
+
w = w * 1000.0
|
| 508 |
+
|
| 509 |
+
half_dim = embedding_dim // 2
|
| 510 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 511 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 512 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 513 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 514 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 515 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 516 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 517 |
+
return emb
|
| 518 |
+
|
| 519 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 520 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 521 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 522 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 523 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 524 |
+
# and should be between [0, 1]
|
| 525 |
+
|
| 526 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 527 |
+
extra_step_kwargs = {}
|
| 528 |
+
if accepts_eta:
|
| 529 |
+
extra_step_kwargs["eta"] = eta
|
| 530 |
+
|
| 531 |
+
# check if the scheduler accepts generator
|
| 532 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 533 |
+
if accepts_generator:
|
| 534 |
+
extra_step_kwargs["generator"] = generator
|
| 535 |
+
return extra_step_kwargs
|
| 536 |
+
|
| 537 |
+
# Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed
|
| 538 |
+
def check_inputs(
|
| 539 |
+
self,
|
| 540 |
+
prompt: Union[str, List[str]],
|
| 541 |
+
height: int,
|
| 542 |
+
width: int,
|
| 543 |
+
callback_steps: int,
|
| 544 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 545 |
+
callback_on_step_end_tensor_inputs=None,
|
| 546 |
+
):
|
| 547 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 548 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 549 |
+
|
| 550 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 551 |
+
raise ValueError(
|
| 552 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 553 |
+
f" {type(callback_steps)}."
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 557 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 558 |
+
):
|
| 559 |
+
raise ValueError(
|
| 560 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
if prompt is not None and prompt_embeds is not None:
|
| 564 |
+
raise ValueError(
|
| 565 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 566 |
+
" only forward one of the two."
|
| 567 |
+
)
|
| 568 |
+
elif prompt is None and prompt_embeds is None:
|
| 569 |
+
raise ValueError(
|
| 570 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 571 |
+
)
|
| 572 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 573 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 574 |
+
|
| 575 |
+
@torch.no_grad()
|
| 576 |
+
def interpolate_embedding(
|
| 577 |
+
self,
|
| 578 |
+
start_embedding: torch.FloatTensor,
|
| 579 |
+
end_embedding: torch.FloatTensor,
|
| 580 |
+
num_interpolation_steps: Union[int, List[int]],
|
| 581 |
+
interpolation_type: str,
|
| 582 |
+
) -> torch.FloatTensor:
|
| 583 |
+
if interpolation_type == "lerp":
|
| 584 |
+
interpolation_fn = lerp
|
| 585 |
+
elif interpolation_type == "slerp":
|
| 586 |
+
interpolation_fn = slerp
|
| 587 |
+
else:
|
| 588 |
+
raise ValueError(
|
| 589 |
+
f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}."
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
embedding = torch.cat([start_embedding, end_embedding])
|
| 593 |
+
steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy()
|
| 594 |
+
steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim)))
|
| 595 |
+
interpolations = []
|
| 596 |
+
|
| 597 |
+
# Interpolate between text embeddings
|
| 598 |
+
# TODO(aryan): Think of a better way of doing this
|
| 599 |
+
# See if it can be done parallelly instead
|
| 600 |
+
for i in range(embedding.shape[0] - 1):
|
| 601 |
+
interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1))
|
| 602 |
+
|
| 603 |
+
interpolations = torch.cat(interpolations)
|
| 604 |
+
return interpolations
|
| 605 |
+
|
| 606 |
+
@torch.no_grad()
|
| 607 |
+
def interpolate_latent(
|
| 608 |
+
self,
|
| 609 |
+
start_latent: torch.FloatTensor,
|
| 610 |
+
end_latent: torch.FloatTensor,
|
| 611 |
+
num_interpolation_steps: Union[int, List[int]],
|
| 612 |
+
interpolation_type: str,
|
| 613 |
+
) -> torch.FloatTensor:
|
| 614 |
+
if interpolation_type == "lerp":
|
| 615 |
+
interpolation_fn = lerp
|
| 616 |
+
elif interpolation_type == "slerp":
|
| 617 |
+
interpolation_fn = slerp
|
| 618 |
+
|
| 619 |
+
latent = torch.cat([start_latent, end_latent])
|
| 620 |
+
steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy()
|
| 621 |
+
steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim)))
|
| 622 |
+
interpolations = []
|
| 623 |
+
|
| 624 |
+
# Interpolate between latents
|
| 625 |
+
# TODO: Think of a better way of doing this
|
| 626 |
+
# See if it can be done parallelly instead
|
| 627 |
+
for i in range(latent.shape[0] - 1):
|
| 628 |
+
interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1))
|
| 629 |
+
|
| 630 |
+
return torch.cat(interpolations)
|
| 631 |
+
|
| 632 |
+
@property
|
| 633 |
+
def guidance_scale(self):
|
| 634 |
+
return self._guidance_scale
|
| 635 |
+
|
| 636 |
+
@property
|
| 637 |
+
def cross_attention_kwargs(self):
|
| 638 |
+
return self._cross_attention_kwargs
|
| 639 |
+
|
| 640 |
+
@property
|
| 641 |
+
def clip_skip(self):
|
| 642 |
+
return self._clip_skip
|
| 643 |
+
|
| 644 |
+
@property
|
| 645 |
+
def num_timesteps(self):
|
| 646 |
+
return self._num_timesteps
|
| 647 |
+
|
| 648 |
+
@torch.no_grad()
|
| 649 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 650 |
+
def __call__(
|
| 651 |
+
self,
|
| 652 |
+
prompt: Union[str, List[str]] = None,
|
| 653 |
+
height: Optional[int] = None,
|
| 654 |
+
width: Optional[int] = None,
|
| 655 |
+
num_inference_steps: int = 4,
|
| 656 |
+
num_interpolation_steps: int = 8,
|
| 657 |
+
original_inference_steps: int = None,
|
| 658 |
+
guidance_scale: float = 8.5,
|
| 659 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 660 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 661 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 662 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 663 |
+
output_type: Optional[str] = "pil",
|
| 664 |
+
return_dict: bool = True,
|
| 665 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 666 |
+
clip_skip: Optional[int] = None,
|
| 667 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 668 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 669 |
+
embedding_interpolation_type: str = "lerp",
|
| 670 |
+
latent_interpolation_type: str = "slerp",
|
| 671 |
+
process_batch_size: int = 4,
|
| 672 |
+
**kwargs,
|
| 673 |
+
):
|
| 674 |
+
r"""
|
| 675 |
+
The call function to the pipeline for generation.
|
| 676 |
+
|
| 677 |
+
Args:
|
| 678 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 679 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 680 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 681 |
+
The height in pixels of the generated image.
|
| 682 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 683 |
+
The width in pixels of the generated image.
|
| 684 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 685 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 686 |
+
expense of slower inference.
|
| 687 |
+
original_inference_steps (`int`, *optional*):
|
| 688 |
+
The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
|
| 689 |
+
we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
|
| 690 |
+
following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
|
| 691 |
+
scheduler's `original_inference_steps` attribute.
|
| 692 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 693 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 694 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 695 |
+
Note that the original latent consistency models paper uses a different CFG formulation where the
|
| 696 |
+
guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale >
|
| 697 |
+
0`).
|
| 698 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 699 |
+
The number of images to generate per prompt.
|
| 700 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 701 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 702 |
+
generation deterministic.
|
| 703 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 704 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 705 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 706 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 707 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 708 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 709 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 710 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 711 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 712 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 713 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 714 |
+
plain tuple.
|
| 715 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 716 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 717 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 718 |
+
clip_skip (`int`, *optional*):
|
| 719 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 720 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 721 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 722 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 723 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 724 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 725 |
+
`callback_on_step_end_tensor_inputs`.
|
| 726 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 727 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 728 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 729 |
+
`._callback_tensor_inputs` attribute of your pipeine class.
|
| 730 |
+
embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`):
|
| 731 |
+
The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`.
|
| 732 |
+
latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`):
|
| 733 |
+
The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`.
|
| 734 |
+
process_batch_size (`int`, *optional*, defaults to 4):
|
| 735 |
+
The batch size to use for processing the images. This is useful when generating a large number of images
|
| 736 |
+
and you want to avoid running out of memory.
|
| 737 |
+
|
| 738 |
+
Examples:
|
| 739 |
+
|
| 740 |
+
Returns:
|
| 741 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 742 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 743 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 744 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 745 |
+
"not-safe-for-work" (nsfw) content.
|
| 746 |
+
"""
|
| 747 |
+
|
| 748 |
+
callback = kwargs.pop("callback", None)
|
| 749 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 750 |
+
|
| 751 |
+
if callback is not None:
|
| 752 |
+
deprecate(
|
| 753 |
+
"callback",
|
| 754 |
+
"1.0.0",
|
| 755 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 756 |
+
)
|
| 757 |
+
if callback_steps is not None:
|
| 758 |
+
deprecate(
|
| 759 |
+
"callback_steps",
|
| 760 |
+
"1.0.0",
|
| 761 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
# 0. Default height and width to unet
|
| 765 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 766 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 767 |
+
|
| 768 |
+
# 1. Check inputs. Raise error if not correct
|
| 769 |
+
self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs)
|
| 770 |
+
self._guidance_scale = guidance_scale
|
| 771 |
+
self._clip_skip = clip_skip
|
| 772 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 773 |
+
|
| 774 |
+
# 2. Define call parameters
|
| 775 |
+
if prompt is not None and isinstance(prompt, str):
|
| 776 |
+
batch_size = 1
|
| 777 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 778 |
+
batch_size = len(prompt)
|
| 779 |
+
else:
|
| 780 |
+
batch_size = prompt_embeds.shape[0]
|
| 781 |
+
if batch_size < 2:
|
| 782 |
+
raise ValueError(f"`prompt` must have length of atleast 2 but found {batch_size}")
|
| 783 |
+
if num_images_per_prompt != 1:
|
| 784 |
+
raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet")
|
| 785 |
+
if prompt_embeds is not None:
|
| 786 |
+
raise ValueError("`prompt_embeds` must be None since it is not supported yet")
|
| 787 |
+
if latents is not None:
|
| 788 |
+
raise ValueError("`latents` must be None since it is not supported yet")
|
| 789 |
+
|
| 790 |
+
device = self._execution_device
|
| 791 |
+
# do_classifier_free_guidance = guidance_scale > 1.0
|
| 792 |
+
|
| 793 |
+
lora_scale = (
|
| 794 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps)
|
| 798 |
+
timesteps = self.scheduler.timesteps
|
| 799 |
+
num_channels_latents = self.unet.config.in_channels
|
| 800 |
+
# bs = batch_size * num_images_per_prompt
|
| 801 |
+
|
| 802 |
+
# 3. Encode initial input prompt
|
| 803 |
+
prompt_embeds_1, _ = self.encode_prompt(
|
| 804 |
+
prompt[:1],
|
| 805 |
+
device,
|
| 806 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 807 |
+
do_classifier_free_guidance=False,
|
| 808 |
+
negative_prompt=None,
|
| 809 |
+
prompt_embeds=prompt_embeds,
|
| 810 |
+
negative_prompt_embeds=None,
|
| 811 |
+
lora_scale=lora_scale,
|
| 812 |
+
clip_skip=self.clip_skip,
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
# 4. Prepare initial latent variables
|
| 816 |
+
latents_1 = self.prepare_latents(
|
| 817 |
+
1,
|
| 818 |
+
num_channels_latents,
|
| 819 |
+
height,
|
| 820 |
+
width,
|
| 821 |
+
prompt_embeds_1.dtype,
|
| 822 |
+
device,
|
| 823 |
+
generator,
|
| 824 |
+
latents,
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None)
|
| 828 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 829 |
+
self._num_timesteps = len(timesteps)
|
| 830 |
+
images = []
|
| 831 |
+
|
| 832 |
+
# 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time
|
| 833 |
+
# otherwise the memory usage ends up being too high.
|
| 834 |
+
with self.progress_bar(total=batch_size - 1) as prompt_progress_bar:
|
| 835 |
+
for i in range(1, batch_size):
|
| 836 |
+
# 6. Encode current prompt
|
| 837 |
+
prompt_embeds_2, _ = self.encode_prompt(
|
| 838 |
+
prompt[i : i + 1],
|
| 839 |
+
device,
|
| 840 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 841 |
+
do_classifier_free_guidance=False,
|
| 842 |
+
negative_prompt=None,
|
| 843 |
+
prompt_embeds=prompt_embeds,
|
| 844 |
+
negative_prompt_embeds=None,
|
| 845 |
+
lora_scale=lora_scale,
|
| 846 |
+
clip_skip=self.clip_skip,
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
# 7. Prepare current latent variables
|
| 850 |
+
latents_2 = self.prepare_latents(
|
| 851 |
+
1,
|
| 852 |
+
num_channels_latents,
|
| 853 |
+
height,
|
| 854 |
+
width,
|
| 855 |
+
prompt_embeds_2.dtype,
|
| 856 |
+
device,
|
| 857 |
+
generator,
|
| 858 |
+
latents,
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
# 8. Interpolate between previous and current prompt embeddings and latents
|
| 862 |
+
inference_embeddings = self.interpolate_embedding(
|
| 863 |
+
start_embedding=prompt_embeds_1,
|
| 864 |
+
end_embedding=prompt_embeds_2,
|
| 865 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 866 |
+
interpolation_type=embedding_interpolation_type,
|
| 867 |
+
)
|
| 868 |
+
inference_latents = self.interpolate_latent(
|
| 869 |
+
start_latent=latents_1,
|
| 870 |
+
end_latent=latents_2,
|
| 871 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 872 |
+
interpolation_type=latent_interpolation_type,
|
| 873 |
+
)
|
| 874 |
+
next_prompt_embeds = inference_embeddings[-1:].detach().clone()
|
| 875 |
+
next_latents = inference_latents[-1:].detach().clone()
|
| 876 |
+
bs = num_interpolation_steps
|
| 877 |
+
|
| 878 |
+
# 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size
|
| 879 |
+
# of the inference. This is useful for reducing memory usage and can be configured based on the
|
| 880 |
+
# available GPU memory.
|
| 881 |
+
with self.progress_bar(
|
| 882 |
+
total=(bs + process_batch_size - 1) // process_batch_size
|
| 883 |
+
) as batch_progress_bar:
|
| 884 |
+
for batch_index in range(0, bs, process_batch_size):
|
| 885 |
+
batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size]
|
| 886 |
+
batch_inference_embedddings = inference_embeddings[
|
| 887 |
+
batch_index : batch_index + process_batch_size
|
| 888 |
+
]
|
| 889 |
+
|
| 890 |
+
self.scheduler.set_timesteps(
|
| 891 |
+
num_inference_steps, device, original_inference_steps=original_inference_steps
|
| 892 |
+
)
|
| 893 |
+
timesteps = self.scheduler.timesteps
|
| 894 |
+
|
| 895 |
+
current_bs = batch_inference_embedddings.shape[0]
|
| 896 |
+
w = torch.tensor(self.guidance_scale - 1).repeat(current_bs)
|
| 897 |
+
w_embedding = self.get_guidance_scale_embedding(
|
| 898 |
+
w, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 899 |
+
).to(device=device, dtype=latents_1.dtype)
|
| 900 |
+
|
| 901 |
+
# 10. Perform inference for current batch
|
| 902 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 903 |
+
for index, t in enumerate(timesteps):
|
| 904 |
+
batch_inference_latents = batch_inference_latents.to(batch_inference_embedddings.dtype)
|
| 905 |
+
|
| 906 |
+
# model prediction (v-prediction, eps, x)
|
| 907 |
+
model_pred = self.unet(
|
| 908 |
+
batch_inference_latents,
|
| 909 |
+
t,
|
| 910 |
+
timestep_cond=w_embedding,
|
| 911 |
+
encoder_hidden_states=batch_inference_embedddings,
|
| 912 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 913 |
+
return_dict=False,
|
| 914 |
+
)[0]
|
| 915 |
+
|
| 916 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 917 |
+
batch_inference_latents, denoised = self.scheduler.step(
|
| 918 |
+
model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False
|
| 919 |
+
)
|
| 920 |
+
if callback_on_step_end is not None:
|
| 921 |
+
callback_kwargs = {}
|
| 922 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 923 |
+
callback_kwargs[k] = locals()[k]
|
| 924 |
+
callback_outputs = callback_on_step_end(self, index, t, callback_kwargs)
|
| 925 |
+
|
| 926 |
+
batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents)
|
| 927 |
+
batch_inference_embedddings = callback_outputs.pop(
|
| 928 |
+
"prompt_embeds", batch_inference_embedddings
|
| 929 |
+
)
|
| 930 |
+
w_embedding = callback_outputs.pop("w_embedding", w_embedding)
|
| 931 |
+
denoised = callback_outputs.pop("denoised", denoised)
|
| 932 |
+
|
| 933 |
+
# call the callback, if provided
|
| 934 |
+
if index == len(timesteps) - 1 or (
|
| 935 |
+
(index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0
|
| 936 |
+
):
|
| 937 |
+
progress_bar.update()
|
| 938 |
+
if callback is not None and index % callback_steps == 0:
|
| 939 |
+
step_idx = index // getattr(self.scheduler, "order", 1)
|
| 940 |
+
callback(step_idx, t, batch_inference_latents)
|
| 941 |
+
|
| 942 |
+
denoised = denoised.to(batch_inference_embedddings.dtype)
|
| 943 |
+
|
| 944 |
+
# Note: This is not supported because you would get black images in your latent walk if
|
| 945 |
+
# NSFW concept is detected
|
| 946 |
+
# if not output_type == "latent":
|
| 947 |
+
# image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 948 |
+
# image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype)
|
| 949 |
+
# else:
|
| 950 |
+
# image = denoised
|
| 951 |
+
# has_nsfw_concept = None
|
| 952 |
+
|
| 953 |
+
# if has_nsfw_concept is None:
|
| 954 |
+
# do_denormalize = [True] * image.shape[0]
|
| 955 |
+
# else:
|
| 956 |
+
# do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 957 |
+
|
| 958 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 959 |
+
do_denormalize = [True] * image.shape[0]
|
| 960 |
+
has_nsfw_concept = None
|
| 961 |
+
|
| 962 |
+
image = self.image_processor.postprocess(
|
| 963 |
+
image, output_type=output_type, do_denormalize=do_denormalize
|
| 964 |
+
)
|
| 965 |
+
images.append(image)
|
| 966 |
+
|
| 967 |
+
batch_progress_bar.update()
|
| 968 |
+
|
| 969 |
+
prompt_embeds_1 = next_prompt_embeds
|
| 970 |
+
latents_1 = next_latents
|
| 971 |
+
|
| 972 |
+
prompt_progress_bar.update()
|
| 973 |
+
|
| 974 |
+
# 11. Determine what should be returned
|
| 975 |
+
if output_type == "pil":
|
| 976 |
+
images = [image for image_list in images for image in image_list]
|
| 977 |
+
elif output_type == "np":
|
| 978 |
+
images = np.concatenate(images)
|
| 979 |
+
elif output_type == "pt":
|
| 980 |
+
images = torch.cat(images)
|
| 981 |
+
else:
|
| 982 |
+
raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.")
|
| 983 |
+
|
| 984 |
+
# Offload all models
|
| 985 |
+
self.maybe_free_model_hooks()
|
| 986 |
+
|
| 987 |
+
if not return_dict:
|
| 988 |
+
return (images, has_nsfw_concept)
|
| 989 |
+
|
| 990 |
+
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/latent_consistency_txt2img.py
ADDED
|
@@ -0,0 +1,726 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
|
| 16 |
+
# and https://github.com/hojonathanho/diffusion
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 25 |
+
|
| 26 |
+
from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
|
| 27 |
+
from diffusers.configuration_utils import register_to_config
|
| 28 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 29 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 30 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 31 |
+
from diffusers.utils import BaseOutput
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LatentConsistencyModelPipeline(DiffusionPipeline):
|
| 38 |
+
_optional_components = ["scheduler"]
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
vae: AutoencoderKL,
|
| 43 |
+
text_encoder: CLIPTextModel,
|
| 44 |
+
tokenizer: CLIPTokenizer,
|
| 45 |
+
unet: UNet2DConditionModel,
|
| 46 |
+
scheduler: "LCMScheduler",
|
| 47 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 48 |
+
feature_extractor: CLIPImageProcessor,
|
| 49 |
+
requires_safety_checker: bool = True,
|
| 50 |
+
):
|
| 51 |
+
super().__init__()
|
| 52 |
+
|
| 53 |
+
scheduler = (
|
| 54 |
+
scheduler
|
| 55 |
+
if scheduler is not None
|
| 56 |
+
else LCMScheduler(
|
| 57 |
+
beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
|
| 58 |
+
)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
self.register_modules(
|
| 62 |
+
vae=vae,
|
| 63 |
+
text_encoder=text_encoder,
|
| 64 |
+
tokenizer=tokenizer,
|
| 65 |
+
unet=unet,
|
| 66 |
+
scheduler=scheduler,
|
| 67 |
+
safety_checker=safety_checker,
|
| 68 |
+
feature_extractor=feature_extractor,
|
| 69 |
+
)
|
| 70 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 71 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 72 |
+
|
| 73 |
+
def _encode_prompt(
|
| 74 |
+
self,
|
| 75 |
+
prompt,
|
| 76 |
+
device,
|
| 77 |
+
num_images_per_prompt,
|
| 78 |
+
prompt_embeds: None,
|
| 79 |
+
):
|
| 80 |
+
r"""
|
| 81 |
+
Encodes the prompt into text encoder hidden states.
|
| 82 |
+
Args:
|
| 83 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 84 |
+
prompt to be encoded
|
| 85 |
+
device: (`torch.device`):
|
| 86 |
+
torch device
|
| 87 |
+
num_images_per_prompt (`int`):
|
| 88 |
+
number of images that should be generated per prompt
|
| 89 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 90 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 91 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
if prompt is not None and isinstance(prompt, str):
|
| 95 |
+
pass
|
| 96 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 97 |
+
len(prompt)
|
| 98 |
+
else:
|
| 99 |
+
prompt_embeds.shape[0]
|
| 100 |
+
|
| 101 |
+
if prompt_embeds is None:
|
| 102 |
+
text_inputs = self.tokenizer(
|
| 103 |
+
prompt,
|
| 104 |
+
padding="max_length",
|
| 105 |
+
max_length=self.tokenizer.model_max_length,
|
| 106 |
+
truncation=True,
|
| 107 |
+
return_tensors="pt",
|
| 108 |
+
)
|
| 109 |
+
text_input_ids = text_inputs.input_ids
|
| 110 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 111 |
+
|
| 112 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 113 |
+
text_input_ids, untruncated_ids
|
| 114 |
+
):
|
| 115 |
+
removed_text = self.tokenizer.batch_decode(
|
| 116 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 117 |
+
)
|
| 118 |
+
logger.warning(
|
| 119 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 120 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 124 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 125 |
+
else:
|
| 126 |
+
attention_mask = None
|
| 127 |
+
|
| 128 |
+
prompt_embeds = self.text_encoder(
|
| 129 |
+
text_input_ids.to(device),
|
| 130 |
+
attention_mask=attention_mask,
|
| 131 |
+
)
|
| 132 |
+
prompt_embeds = prompt_embeds[0]
|
| 133 |
+
|
| 134 |
+
if self.text_encoder is not None:
|
| 135 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 136 |
+
elif self.unet is not None:
|
| 137 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 138 |
+
else:
|
| 139 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 140 |
+
|
| 141 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 142 |
+
|
| 143 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 144 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 145 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 146 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 147 |
+
|
| 148 |
+
# Don't need to get uncond prompt embedding because of LCM Guided Distillation
|
| 149 |
+
return prompt_embeds
|
| 150 |
+
|
| 151 |
+
def run_safety_checker(self, image, device, dtype):
|
| 152 |
+
if self.safety_checker is None:
|
| 153 |
+
has_nsfw_concept = None
|
| 154 |
+
else:
|
| 155 |
+
if torch.is_tensor(image):
|
| 156 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 157 |
+
else:
|
| 158 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 159 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 160 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 161 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 162 |
+
)
|
| 163 |
+
return image, has_nsfw_concept
|
| 164 |
+
|
| 165 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
|
| 166 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 167 |
+
if latents is None:
|
| 168 |
+
latents = torch.randn(shape, dtype=dtype).to(device)
|
| 169 |
+
else:
|
| 170 |
+
latents = latents.to(device)
|
| 171 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 172 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 173 |
+
return latents
|
| 174 |
+
|
| 175 |
+
def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 176 |
+
"""
|
| 177 |
+
see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 178 |
+
Args:
|
| 179 |
+
timesteps: torch.Tensor: generate embedding vectors at these timesteps
|
| 180 |
+
embedding_dim: int: dimension of the embeddings to generate
|
| 181 |
+
dtype: data type of the generated embeddings
|
| 182 |
+
Returns:
|
| 183 |
+
embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 184 |
+
"""
|
| 185 |
+
assert len(w.shape) == 1
|
| 186 |
+
w = w * 1000.0
|
| 187 |
+
|
| 188 |
+
half_dim = embedding_dim // 2
|
| 189 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 190 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 191 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 192 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 193 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 194 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 195 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 196 |
+
return emb
|
| 197 |
+
|
| 198 |
+
@torch.no_grad()
|
| 199 |
+
def __call__(
|
| 200 |
+
self,
|
| 201 |
+
prompt: Union[str, List[str]] = None,
|
| 202 |
+
height: Optional[int] = 768,
|
| 203 |
+
width: Optional[int] = 768,
|
| 204 |
+
guidance_scale: float = 7.5,
|
| 205 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 206 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 207 |
+
num_inference_steps: int = 4,
|
| 208 |
+
lcm_origin_steps: int = 50,
|
| 209 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 210 |
+
output_type: Optional[str] = "pil",
|
| 211 |
+
return_dict: bool = True,
|
| 212 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 213 |
+
):
|
| 214 |
+
# 0. Default height and width to unet
|
| 215 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 216 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 217 |
+
|
| 218 |
+
# 2. Define call parameters
|
| 219 |
+
if prompt is not None and isinstance(prompt, str):
|
| 220 |
+
batch_size = 1
|
| 221 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 222 |
+
batch_size = len(prompt)
|
| 223 |
+
else:
|
| 224 |
+
batch_size = prompt_embeds.shape[0]
|
| 225 |
+
|
| 226 |
+
device = self._execution_device
|
| 227 |
+
# do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
|
| 228 |
+
|
| 229 |
+
# 3. Encode input prompt
|
| 230 |
+
prompt_embeds = self._encode_prompt(
|
| 231 |
+
prompt,
|
| 232 |
+
device,
|
| 233 |
+
num_images_per_prompt,
|
| 234 |
+
prompt_embeds=prompt_embeds,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# 4. Prepare timesteps
|
| 238 |
+
self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
|
| 239 |
+
timesteps = self.scheduler.timesteps
|
| 240 |
+
|
| 241 |
+
# 5. Prepare latent variable
|
| 242 |
+
num_channels_latents = self.unet.config.in_channels
|
| 243 |
+
latents = self.prepare_latents(
|
| 244 |
+
batch_size * num_images_per_prompt,
|
| 245 |
+
num_channels_latents,
|
| 246 |
+
height,
|
| 247 |
+
width,
|
| 248 |
+
prompt_embeds.dtype,
|
| 249 |
+
device,
|
| 250 |
+
latents,
|
| 251 |
+
)
|
| 252 |
+
bs = batch_size * num_images_per_prompt
|
| 253 |
+
|
| 254 |
+
# 6. Get Guidance Scale Embedding
|
| 255 |
+
w = torch.tensor(guidance_scale).repeat(bs)
|
| 256 |
+
w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
|
| 257 |
+
|
| 258 |
+
# 7. LCM MultiStep Sampling Loop:
|
| 259 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 260 |
+
for i, t in enumerate(timesteps):
|
| 261 |
+
ts = torch.full((bs,), t, device=device, dtype=torch.long)
|
| 262 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 263 |
+
|
| 264 |
+
# model prediction (v-prediction, eps, x)
|
| 265 |
+
model_pred = self.unet(
|
| 266 |
+
latents,
|
| 267 |
+
ts,
|
| 268 |
+
timestep_cond=w_embedding,
|
| 269 |
+
encoder_hidden_states=prompt_embeds,
|
| 270 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 271 |
+
return_dict=False,
|
| 272 |
+
)[0]
|
| 273 |
+
|
| 274 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 275 |
+
latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
|
| 276 |
+
|
| 277 |
+
# # call the callback, if provided
|
| 278 |
+
# if i == len(timesteps) - 1:
|
| 279 |
+
progress_bar.update()
|
| 280 |
+
|
| 281 |
+
denoised = denoised.to(prompt_embeds.dtype)
|
| 282 |
+
if not output_type == "latent":
|
| 283 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 284 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 285 |
+
else:
|
| 286 |
+
image = denoised
|
| 287 |
+
has_nsfw_concept = None
|
| 288 |
+
|
| 289 |
+
if has_nsfw_concept is None:
|
| 290 |
+
do_denormalize = [True] * image.shape[0]
|
| 291 |
+
else:
|
| 292 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 293 |
+
|
| 294 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 295 |
+
|
| 296 |
+
if not return_dict:
|
| 297 |
+
return (image, has_nsfw_concept)
|
| 298 |
+
|
| 299 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
@dataclass
|
| 303 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
|
| 304 |
+
class LCMSchedulerOutput(BaseOutput):
|
| 305 |
+
"""
|
| 306 |
+
Output class for the scheduler's `step` function output.
|
| 307 |
+
Args:
|
| 308 |
+
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 309 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 310 |
+
denoising loop.
|
| 311 |
+
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 312 |
+
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
|
| 313 |
+
`pred_original_sample` can be used to preview progress or for guidance.
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
prev_sample: torch.FloatTensor
|
| 317 |
+
denoised: Optional[torch.FloatTensor] = None
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
| 321 |
+
def betas_for_alpha_bar(
|
| 322 |
+
num_diffusion_timesteps,
|
| 323 |
+
max_beta=0.999,
|
| 324 |
+
alpha_transform_type="cosine",
|
| 325 |
+
):
|
| 326 |
+
"""
|
| 327 |
+
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
| 328 |
+
(1-beta) over time from t = [0,1].
|
| 329 |
+
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
| 330 |
+
to that part of the diffusion process.
|
| 331 |
+
Args:
|
| 332 |
+
num_diffusion_timesteps (`int`): the number of betas to produce.
|
| 333 |
+
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
| 334 |
+
prevent singularities.
|
| 335 |
+
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
| 336 |
+
Choose from `cosine` or `exp`
|
| 337 |
+
Returns:
|
| 338 |
+
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
| 339 |
+
"""
|
| 340 |
+
if alpha_transform_type == "cosine":
|
| 341 |
+
|
| 342 |
+
def alpha_bar_fn(t):
|
| 343 |
+
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
| 344 |
+
|
| 345 |
+
elif alpha_transform_type == "exp":
|
| 346 |
+
|
| 347 |
+
def alpha_bar_fn(t):
|
| 348 |
+
return math.exp(t * -12.0)
|
| 349 |
+
|
| 350 |
+
else:
|
| 351 |
+
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
|
| 352 |
+
|
| 353 |
+
betas = []
|
| 354 |
+
for i in range(num_diffusion_timesteps):
|
| 355 |
+
t1 = i / num_diffusion_timesteps
|
| 356 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 357 |
+
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
| 358 |
+
return torch.tensor(betas, dtype=torch.float32)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def rescale_zero_terminal_snr(betas):
|
| 362 |
+
"""
|
| 363 |
+
Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
|
| 364 |
+
Args:
|
| 365 |
+
betas (`torch.FloatTensor`):
|
| 366 |
+
the betas that the scheduler is being initialized with.
|
| 367 |
+
Returns:
|
| 368 |
+
`torch.FloatTensor`: rescaled betas with zero terminal SNR
|
| 369 |
+
"""
|
| 370 |
+
# Convert betas to alphas_bar_sqrt
|
| 371 |
+
alphas = 1.0 - betas
|
| 372 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
| 373 |
+
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
| 374 |
+
|
| 375 |
+
# Store old values.
|
| 376 |
+
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
| 377 |
+
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
| 378 |
+
|
| 379 |
+
# Shift so the last timestep is zero.
|
| 380 |
+
alphas_bar_sqrt -= alphas_bar_sqrt_T
|
| 381 |
+
|
| 382 |
+
# Scale so the first timestep is back to the old value.
|
| 383 |
+
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
| 384 |
+
|
| 385 |
+
# Convert alphas_bar_sqrt to betas
|
| 386 |
+
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
| 387 |
+
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
|
| 388 |
+
alphas = torch.cat([alphas_bar[0:1], alphas])
|
| 389 |
+
betas = 1 - alphas
|
| 390 |
+
|
| 391 |
+
return betas
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class LCMScheduler(SchedulerMixin, ConfigMixin):
|
| 395 |
+
"""
|
| 396 |
+
`LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
|
| 397 |
+
non-Markovian guidance.
|
| 398 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 399 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 400 |
+
Args:
|
| 401 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 402 |
+
The number of diffusion steps to train the model.
|
| 403 |
+
beta_start (`float`, defaults to 0.0001):
|
| 404 |
+
The starting `beta` value of inference.
|
| 405 |
+
beta_end (`float`, defaults to 0.02):
|
| 406 |
+
The final `beta` value.
|
| 407 |
+
beta_schedule (`str`, defaults to `"linear"`):
|
| 408 |
+
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
| 409 |
+
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
| 410 |
+
trained_betas (`np.ndarray`, *optional*):
|
| 411 |
+
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
|
| 412 |
+
clip_sample (`bool`, defaults to `True`):
|
| 413 |
+
Clip the predicted sample for numerical stability.
|
| 414 |
+
clip_sample_range (`float`, defaults to 1.0):
|
| 415 |
+
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
|
| 416 |
+
set_alpha_to_one (`bool`, defaults to `True`):
|
| 417 |
+
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
|
| 418 |
+
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
|
| 419 |
+
otherwise it uses the alpha value at step 0.
|
| 420 |
+
steps_offset (`int`, defaults to 0):
|
| 421 |
+
An offset added to the inference steps, as required by some model families.
|
| 422 |
+
prediction_type (`str`, defaults to `epsilon`, *optional*):
|
| 423 |
+
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
|
| 424 |
+
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
|
| 425 |
+
Video](https://imagen.research.google/video/paper.pdf) paper).
|
| 426 |
+
thresholding (`bool`, defaults to `False`):
|
| 427 |
+
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
|
| 428 |
+
as Stable Diffusion.
|
| 429 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 430 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 431 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 432 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
|
| 433 |
+
timestep_spacing (`str`, defaults to `"leading"`):
|
| 434 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 435 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 436 |
+
rescale_betas_zero_snr (`bool`, defaults to `False`):
|
| 437 |
+
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
|
| 438 |
+
dark samples instead of limiting it to samples with medium brightness. Loosely related to
|
| 439 |
+
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
|
| 440 |
+
"""
|
| 441 |
+
|
| 442 |
+
# _compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
| 443 |
+
order = 1
|
| 444 |
+
|
| 445 |
+
@register_to_config
|
| 446 |
+
def __init__(
|
| 447 |
+
self,
|
| 448 |
+
num_train_timesteps: int = 1000,
|
| 449 |
+
beta_start: float = 0.0001,
|
| 450 |
+
beta_end: float = 0.02,
|
| 451 |
+
beta_schedule: str = "linear",
|
| 452 |
+
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
| 453 |
+
clip_sample: bool = True,
|
| 454 |
+
set_alpha_to_one: bool = True,
|
| 455 |
+
steps_offset: int = 0,
|
| 456 |
+
prediction_type: str = "epsilon",
|
| 457 |
+
thresholding: bool = False,
|
| 458 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 459 |
+
clip_sample_range: float = 1.0,
|
| 460 |
+
sample_max_value: float = 1.0,
|
| 461 |
+
timestep_spacing: str = "leading",
|
| 462 |
+
rescale_betas_zero_snr: bool = False,
|
| 463 |
+
):
|
| 464 |
+
if trained_betas is not None:
|
| 465 |
+
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
| 466 |
+
elif beta_schedule == "linear":
|
| 467 |
+
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
| 468 |
+
elif beta_schedule == "scaled_linear":
|
| 469 |
+
# this schedule is very specific to the latent diffusion model.
|
| 470 |
+
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
| 471 |
+
elif beta_schedule == "squaredcos_cap_v2":
|
| 472 |
+
# Glide cosine schedule
|
| 473 |
+
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
| 474 |
+
else:
|
| 475 |
+
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
| 476 |
+
|
| 477 |
+
# Rescale for zero SNR
|
| 478 |
+
if rescale_betas_zero_snr:
|
| 479 |
+
self.betas = rescale_zero_terminal_snr(self.betas)
|
| 480 |
+
|
| 481 |
+
self.alphas = 1.0 - self.betas
|
| 482 |
+
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
| 483 |
+
|
| 484 |
+
# At every step in ddim, we are looking into the previous alphas_cumprod
|
| 485 |
+
# For the final step, there is no previous alphas_cumprod because we are already at 0
|
| 486 |
+
# `set_alpha_to_one` decides whether we set this parameter simply to one or
|
| 487 |
+
# whether we use the final alpha of the "non-previous" one.
|
| 488 |
+
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
|
| 489 |
+
|
| 490 |
+
# standard deviation of the initial noise distribution
|
| 491 |
+
self.init_noise_sigma = 1.0
|
| 492 |
+
|
| 493 |
+
# setable values
|
| 494 |
+
self.num_inference_steps = None
|
| 495 |
+
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
| 496 |
+
|
| 497 |
+
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
|
| 498 |
+
"""
|
| 499 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 500 |
+
current timestep.
|
| 501 |
+
Args:
|
| 502 |
+
sample (`torch.FloatTensor`):
|
| 503 |
+
The input sample.
|
| 504 |
+
timestep (`int`, *optional*):
|
| 505 |
+
The current timestep in the diffusion chain.
|
| 506 |
+
Returns:
|
| 507 |
+
`torch.FloatTensor`:
|
| 508 |
+
A scaled input sample.
|
| 509 |
+
"""
|
| 510 |
+
return sample
|
| 511 |
+
|
| 512 |
+
def _get_variance(self, timestep, prev_timestep):
|
| 513 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 514 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 515 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 516 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 517 |
+
|
| 518 |
+
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
|
| 519 |
+
|
| 520 |
+
return variance
|
| 521 |
+
|
| 522 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 523 |
+
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
|
| 524 |
+
"""
|
| 525 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 526 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 527 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 528 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 529 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 530 |
+
https://arxiv.org/abs/2205.11487
|
| 531 |
+
"""
|
| 532 |
+
dtype = sample.dtype
|
| 533 |
+
batch_size, channels, height, width = sample.shape
|
| 534 |
+
|
| 535 |
+
if dtype not in (torch.float32, torch.float64):
|
| 536 |
+
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 537 |
+
|
| 538 |
+
# Flatten sample for doing quantile calculation along each image
|
| 539 |
+
sample = sample.reshape(batch_size, channels * height * width)
|
| 540 |
+
|
| 541 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 542 |
+
|
| 543 |
+
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 544 |
+
s = torch.clamp(
|
| 545 |
+
s, min=1, max=self.config.sample_max_value
|
| 546 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 547 |
+
|
| 548 |
+
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 549 |
+
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 550 |
+
|
| 551 |
+
sample = sample.reshape(batch_size, channels, height, width)
|
| 552 |
+
sample = sample.to(dtype)
|
| 553 |
+
|
| 554 |
+
return sample
|
| 555 |
+
|
| 556 |
+
def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
|
| 557 |
+
"""
|
| 558 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 559 |
+
Args:
|
| 560 |
+
num_inference_steps (`int`):
|
| 561 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 562 |
+
"""
|
| 563 |
+
|
| 564 |
+
if num_inference_steps > self.config.num_train_timesteps:
|
| 565 |
+
raise ValueError(
|
| 566 |
+
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
|
| 567 |
+
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
|
| 568 |
+
f" maximal {self.config.num_train_timesteps} timesteps."
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
self.num_inference_steps = num_inference_steps
|
| 572 |
+
|
| 573 |
+
# LCM Timesteps Setting: # Linear Spacing
|
| 574 |
+
c = self.config.num_train_timesteps // lcm_origin_steps
|
| 575 |
+
lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
|
| 576 |
+
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
| 577 |
+
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
| 578 |
+
|
| 579 |
+
self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
|
| 580 |
+
|
| 581 |
+
def get_scalings_for_boundary_condition_discrete(self, t):
|
| 582 |
+
self.sigma_data = 0.5 # Default: 0.5
|
| 583 |
+
|
| 584 |
+
# By dividing 0.1: This is almost a delta function at t=0.
|
| 585 |
+
c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
|
| 586 |
+
c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
|
| 587 |
+
return c_skip, c_out
|
| 588 |
+
|
| 589 |
+
def step(
|
| 590 |
+
self,
|
| 591 |
+
model_output: torch.FloatTensor,
|
| 592 |
+
timeindex: int,
|
| 593 |
+
timestep: int,
|
| 594 |
+
sample: torch.FloatTensor,
|
| 595 |
+
eta: float = 0.0,
|
| 596 |
+
use_clipped_model_output: bool = False,
|
| 597 |
+
generator=None,
|
| 598 |
+
variance_noise: Optional[torch.FloatTensor] = None,
|
| 599 |
+
return_dict: bool = True,
|
| 600 |
+
) -> Union[LCMSchedulerOutput, Tuple]:
|
| 601 |
+
"""
|
| 602 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 603 |
+
process from the learned model outputs (most often the predicted noise).
|
| 604 |
+
Args:
|
| 605 |
+
model_output (`torch.FloatTensor`):
|
| 606 |
+
The direct output from learned diffusion model.
|
| 607 |
+
timestep (`float`):
|
| 608 |
+
The current discrete timestep in the diffusion chain.
|
| 609 |
+
sample (`torch.FloatTensor`):
|
| 610 |
+
A current instance of a sample created by the diffusion process.
|
| 611 |
+
eta (`float`):
|
| 612 |
+
The weight of noise for added noise in diffusion step.
|
| 613 |
+
use_clipped_model_output (`bool`, defaults to `False`):
|
| 614 |
+
If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
|
| 615 |
+
because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
|
| 616 |
+
clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
|
| 617 |
+
`use_clipped_model_output` has no effect.
|
| 618 |
+
generator (`torch.Generator`, *optional*):
|
| 619 |
+
A random number generator.
|
| 620 |
+
variance_noise (`torch.FloatTensor`):
|
| 621 |
+
Alternative to generating noise with `generator` by directly providing the noise for the variance
|
| 622 |
+
itself. Useful for methods such as [`CycleDiffusion`].
|
| 623 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 624 |
+
Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
|
| 625 |
+
Returns:
|
| 626 |
+
[`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
|
| 627 |
+
If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
|
| 628 |
+
tuple is returned where the first element is the sample tensor.
|
| 629 |
+
"""
|
| 630 |
+
if self.num_inference_steps is None:
|
| 631 |
+
raise ValueError(
|
| 632 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
# 1. get previous step value
|
| 636 |
+
prev_timeindex = timeindex + 1
|
| 637 |
+
if prev_timeindex < len(self.timesteps):
|
| 638 |
+
prev_timestep = self.timesteps[prev_timeindex]
|
| 639 |
+
else:
|
| 640 |
+
prev_timestep = timestep
|
| 641 |
+
|
| 642 |
+
# 2. compute alphas, betas
|
| 643 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 644 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 645 |
+
|
| 646 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 647 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 648 |
+
|
| 649 |
+
# 3. Get scalings for boundary conditions
|
| 650 |
+
c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
|
| 651 |
+
|
| 652 |
+
# 4. Different Parameterization:
|
| 653 |
+
parameterization = self.config.prediction_type
|
| 654 |
+
|
| 655 |
+
if parameterization == "epsilon": # noise-prediction
|
| 656 |
+
pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
|
| 657 |
+
|
| 658 |
+
elif parameterization == "sample": # x-prediction
|
| 659 |
+
pred_x0 = model_output
|
| 660 |
+
|
| 661 |
+
elif parameterization == "v_prediction": # v-prediction
|
| 662 |
+
pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
|
| 663 |
+
|
| 664 |
+
# 4. Denoise model output using boundary conditions
|
| 665 |
+
denoised = c_out * pred_x0 + c_skip * sample
|
| 666 |
+
|
| 667 |
+
# 5. Sample z ~ N(0, I), For MultiStep Inference
|
| 668 |
+
# Noise is not used for one-step sampling.
|
| 669 |
+
if len(self.timesteps) > 1:
|
| 670 |
+
noise = torch.randn(model_output.shape).to(model_output.device)
|
| 671 |
+
prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
|
| 672 |
+
else:
|
| 673 |
+
prev_sample = denoised
|
| 674 |
+
|
| 675 |
+
if not return_dict:
|
| 676 |
+
return (prev_sample, denoised)
|
| 677 |
+
|
| 678 |
+
return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
|
| 679 |
+
|
| 680 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
| 681 |
+
def add_noise(
|
| 682 |
+
self,
|
| 683 |
+
original_samples: torch.FloatTensor,
|
| 684 |
+
noise: torch.FloatTensor,
|
| 685 |
+
timesteps: torch.IntTensor,
|
| 686 |
+
) -> torch.FloatTensor:
|
| 687 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
| 688 |
+
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
| 689 |
+
timesteps = timesteps.to(original_samples.device)
|
| 690 |
+
|
| 691 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 692 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 693 |
+
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
| 694 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 695 |
+
|
| 696 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 697 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 698 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
| 699 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 700 |
+
|
| 701 |
+
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
| 702 |
+
return noisy_samples
|
| 703 |
+
|
| 704 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
|
| 705 |
+
def get_velocity(
|
| 706 |
+
self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
|
| 707 |
+
) -> torch.FloatTensor:
|
| 708 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as sample
|
| 709 |
+
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
|
| 710 |
+
timesteps = timesteps.to(sample.device)
|
| 711 |
+
|
| 712 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 713 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 714 |
+
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
| 715 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 716 |
+
|
| 717 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 718 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 719 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
| 720 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 721 |
+
|
| 722 |
+
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
| 723 |
+
return velocity
|
| 724 |
+
|
| 725 |
+
def __len__(self):
|
| 726 |
+
return self.config.num_train_timesteps
|
v0.27.0/llm_grounded_diffusion.py
ADDED
|
@@ -0,0 +1,1558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# This is a single file implementation of LMD+. See README.md for examples.
|
| 16 |
+
|
| 17 |
+
import ast
|
| 18 |
+
import gc
|
| 19 |
+
import inspect
|
| 20 |
+
import math
|
| 21 |
+
import warnings
|
| 22 |
+
from collections.abc import Iterable
|
| 23 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
import torch.nn.functional as F
|
| 27 |
+
from packaging import version
|
| 28 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 29 |
+
|
| 30 |
+
from diffusers.configuration_utils import FrozenDict
|
| 31 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 32 |
+
from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 33 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 34 |
+
from diffusers.models.attention import Attention, GatedSelfAttentionDense
|
| 35 |
+
from diffusers.models.attention_processor import AttnProcessor2_0
|
| 36 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 37 |
+
from diffusers.pipelines import DiffusionPipeline
|
| 38 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 39 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 40 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 41 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 42 |
+
from diffusers.utils import (
|
| 43 |
+
USE_PEFT_BACKEND,
|
| 44 |
+
deprecate,
|
| 45 |
+
logging,
|
| 46 |
+
replace_example_docstring,
|
| 47 |
+
scale_lora_layers,
|
| 48 |
+
unscale_lora_layers,
|
| 49 |
+
)
|
| 50 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
EXAMPLE_DOC_STRING = """
|
| 54 |
+
Examples:
|
| 55 |
+
```py
|
| 56 |
+
>>> import torch
|
| 57 |
+
>>> from diffusers import DiffusionPipeline
|
| 58 |
+
|
| 59 |
+
>>> pipe = DiffusionPipeline.from_pretrained(
|
| 60 |
+
... "longlian/lmd_plus",
|
| 61 |
+
... custom_pipeline="llm_grounded_diffusion",
|
| 62 |
+
... custom_revision="main",
|
| 63 |
+
... variant="fp16", torch_dtype=torch.float16
|
| 64 |
+
... )
|
| 65 |
+
>>> pipe.enable_model_cpu_offload()
|
| 66 |
+
|
| 67 |
+
>>> # Generate an image described by the prompt and
|
| 68 |
+
>>> # insert objects described by text at the region defined by bounding boxes
|
| 69 |
+
>>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
|
| 70 |
+
>>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
|
| 71 |
+
>>> phrases = ["a waterfall", "a modern high speed train"]
|
| 72 |
+
|
| 73 |
+
>>> images = pipe(
|
| 74 |
+
... prompt=prompt,
|
| 75 |
+
... phrases=phrases,
|
| 76 |
+
... boxes=boxes,
|
| 77 |
+
... gligen_scheduled_sampling_beta=0.4,
|
| 78 |
+
... output_type="pil",
|
| 79 |
+
... num_inference_steps=50,
|
| 80 |
+
... lmd_guidance_kwargs={}
|
| 81 |
+
... ).images
|
| 82 |
+
|
| 83 |
+
>>> images[0].save("./lmd_plus_generation.jpg")
|
| 84 |
+
|
| 85 |
+
>>> # Generate directly from a text prompt and an LLM response
|
| 86 |
+
>>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
|
| 87 |
+
>>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\"""
|
| 88 |
+
[('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])]
|
| 89 |
+
Background prompt: A beautiful forest with fall foliage
|
| 90 |
+
Negative prompt:
|
| 91 |
+
\""")
|
| 92 |
+
|
| 93 |
+
>> images = pipe(
|
| 94 |
+
... prompt=prompt,
|
| 95 |
+
... negative_prompt=neg_prompt,
|
| 96 |
+
... phrases=phrases,
|
| 97 |
+
... boxes=boxes,
|
| 98 |
+
... gligen_scheduled_sampling_beta=0.4,
|
| 99 |
+
... output_type="pil",
|
| 100 |
+
... num_inference_steps=50,
|
| 101 |
+
... lmd_guidance_kwargs={}
|
| 102 |
+
... ).images
|
| 103 |
+
|
| 104 |
+
>>> images[0].save("./lmd_plus_generation.jpg")
|
| 105 |
+
|
| 106 |
+
images[0]
|
| 107 |
+
|
| 108 |
+
```
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 112 |
+
|
| 113 |
+
# All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)]
|
| 114 |
+
# Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`.
|
| 115 |
+
DEFAULT_GUIDANCE_ATTN_KEYS = [
|
| 116 |
+
("mid", 0, 0, 0),
|
| 117 |
+
("up", 1, 0, 0),
|
| 118 |
+
("up", 1, 1, 0),
|
| 119 |
+
("up", 1, 2, 0),
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def convert_attn_keys(key):
|
| 124 |
+
"""Convert the attention key from tuple format to the torch state format"""
|
| 125 |
+
|
| 126 |
+
if key[0] == "mid":
|
| 127 |
+
assert key[1] == 0, f"mid block only has one block but the index is {key[1]}"
|
| 128 |
+
return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
|
| 129 |
+
|
| 130 |
+
return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS]
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def scale_proportion(obj_box, H, W):
|
| 137 |
+
# Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5".
|
| 138 |
+
x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H)
|
| 139 |
+
box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H)
|
| 140 |
+
x_max, y_max = x_min + box_w, y_min + box_h
|
| 141 |
+
|
| 142 |
+
x_min, y_min = max(x_min, 0), max(y_min, 0)
|
| 143 |
+
x_max, y_max = min(x_max, W), min(y_max, H)
|
| 144 |
+
|
| 145 |
+
return x_min, y_min, x_max, y_max
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# Adapted from the parent class `AttnProcessor2_0`
|
| 149 |
+
class AttnProcessorWithHook(AttnProcessor2_0):
|
| 150 |
+
def __init__(
|
| 151 |
+
self,
|
| 152 |
+
attn_processor_key,
|
| 153 |
+
hidden_size,
|
| 154 |
+
cross_attention_dim,
|
| 155 |
+
hook=None,
|
| 156 |
+
fast_attn=True,
|
| 157 |
+
enabled=True,
|
| 158 |
+
):
|
| 159 |
+
super().__init__()
|
| 160 |
+
self.attn_processor_key = attn_processor_key
|
| 161 |
+
self.hidden_size = hidden_size
|
| 162 |
+
self.cross_attention_dim = cross_attention_dim
|
| 163 |
+
self.hook = hook
|
| 164 |
+
self.fast_attn = fast_attn
|
| 165 |
+
self.enabled = enabled
|
| 166 |
+
|
| 167 |
+
def __call__(
|
| 168 |
+
self,
|
| 169 |
+
attn: Attention,
|
| 170 |
+
hidden_states,
|
| 171 |
+
encoder_hidden_states=None,
|
| 172 |
+
attention_mask=None,
|
| 173 |
+
temb=None,
|
| 174 |
+
scale: float = 1.0,
|
| 175 |
+
):
|
| 176 |
+
residual = hidden_states
|
| 177 |
+
|
| 178 |
+
if attn.spatial_norm is not None:
|
| 179 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 180 |
+
|
| 181 |
+
input_ndim = hidden_states.ndim
|
| 182 |
+
|
| 183 |
+
if input_ndim == 4:
|
| 184 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 185 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 186 |
+
|
| 187 |
+
batch_size, sequence_length, _ = (
|
| 188 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if attention_mask is not None:
|
| 192 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 193 |
+
|
| 194 |
+
if attn.group_norm is not None:
|
| 195 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 196 |
+
|
| 197 |
+
args = () if USE_PEFT_BACKEND else (scale,)
|
| 198 |
+
query = attn.to_q(hidden_states, *args)
|
| 199 |
+
|
| 200 |
+
if encoder_hidden_states is None:
|
| 201 |
+
encoder_hidden_states = hidden_states
|
| 202 |
+
elif attn.norm_cross:
|
| 203 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 204 |
+
|
| 205 |
+
key = attn.to_k(encoder_hidden_states, *args)
|
| 206 |
+
value = attn.to_v(encoder_hidden_states, *args)
|
| 207 |
+
|
| 208 |
+
inner_dim = key.shape[-1]
|
| 209 |
+
head_dim = inner_dim // attn.heads
|
| 210 |
+
|
| 211 |
+
if (self.hook is not None and self.enabled) or not self.fast_attn:
|
| 212 |
+
query_batch_dim = attn.head_to_batch_dim(query)
|
| 213 |
+
key_batch_dim = attn.head_to_batch_dim(key)
|
| 214 |
+
value_batch_dim = attn.head_to_batch_dim(value)
|
| 215 |
+
attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask)
|
| 216 |
+
|
| 217 |
+
if self.hook is not None and self.enabled:
|
| 218 |
+
# Call the hook with query, key, value, and attention maps
|
| 219 |
+
self.hook(
|
| 220 |
+
self.attn_processor_key,
|
| 221 |
+
query_batch_dim,
|
| 222 |
+
key_batch_dim,
|
| 223 |
+
value_batch_dim,
|
| 224 |
+
attention_probs,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
if self.fast_attn:
|
| 228 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 229 |
+
|
| 230 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 231 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 232 |
+
|
| 233 |
+
if attention_mask is not None:
|
| 234 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 235 |
+
# (batch, heads, source_length, target_length)
|
| 236 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 237 |
+
|
| 238 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 239 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 240 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 241 |
+
query,
|
| 242 |
+
key,
|
| 243 |
+
value,
|
| 244 |
+
attn_mask=attention_mask,
|
| 245 |
+
dropout_p=0.0,
|
| 246 |
+
is_causal=False,
|
| 247 |
+
)
|
| 248 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 249 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 250 |
+
else:
|
| 251 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 252 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 253 |
+
|
| 254 |
+
# linear proj
|
| 255 |
+
hidden_states = attn.to_out[0](hidden_states, *args)
|
| 256 |
+
# dropout
|
| 257 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 258 |
+
|
| 259 |
+
if input_ndim == 4:
|
| 260 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 261 |
+
|
| 262 |
+
if attn.residual_connection:
|
| 263 |
+
hidden_states = hidden_states + residual
|
| 264 |
+
|
| 265 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 266 |
+
|
| 267 |
+
return hidden_states
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class LLMGroundedDiffusionPipeline(
|
| 271 |
+
DiffusionPipeline,
|
| 272 |
+
StableDiffusionMixin,
|
| 273 |
+
TextualInversionLoaderMixin,
|
| 274 |
+
LoraLoaderMixin,
|
| 275 |
+
IPAdapterMixin,
|
| 276 |
+
FromSingleFileMixin,
|
| 277 |
+
):
|
| 278 |
+
r"""
|
| 279 |
+
Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf.
|
| 280 |
+
|
| 281 |
+
This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods
|
| 282 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 283 |
+
|
| 284 |
+
This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
vae ([`AutoencoderKL`]):
|
| 288 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 289 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 290 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 291 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 292 |
+
A `CLIPTokenizer` to tokenize text.
|
| 293 |
+
unet ([`UNet2DConditionModel`]):
|
| 294 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 295 |
+
scheduler ([`SchedulerMixin`]):
|
| 296 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 297 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 298 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 299 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 300 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 301 |
+
about a model's potential harms.
|
| 302 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 303 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 304 |
+
requires_safety_checker (bool):
|
| 305 |
+
Whether a safety checker is needed for this pipeline.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 309 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 310 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 311 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 312 |
+
|
| 313 |
+
objects_text = "Objects: "
|
| 314 |
+
bg_prompt_text = "Background prompt: "
|
| 315 |
+
bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip()
|
| 316 |
+
neg_prompt_text = "Negative prompt: "
|
| 317 |
+
neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip()
|
| 318 |
+
|
| 319 |
+
def __init__(
|
| 320 |
+
self,
|
| 321 |
+
vae: AutoencoderKL,
|
| 322 |
+
text_encoder: CLIPTextModel,
|
| 323 |
+
tokenizer: CLIPTokenizer,
|
| 324 |
+
unet: UNet2DConditionModel,
|
| 325 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 326 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 327 |
+
feature_extractor: CLIPImageProcessor,
|
| 328 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 329 |
+
requires_safety_checker: bool = True,
|
| 330 |
+
):
|
| 331 |
+
# This is copied from StableDiffusionPipeline, with hook initizations for LMD+.
|
| 332 |
+
super().__init__()
|
| 333 |
+
|
| 334 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 335 |
+
deprecation_message = (
|
| 336 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 337 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 338 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 339 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 340 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 341 |
+
" file"
|
| 342 |
+
)
|
| 343 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 344 |
+
new_config = dict(scheduler.config)
|
| 345 |
+
new_config["steps_offset"] = 1
|
| 346 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 347 |
+
|
| 348 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 349 |
+
deprecation_message = (
|
| 350 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 351 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 352 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 353 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 354 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 355 |
+
)
|
| 356 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 357 |
+
new_config = dict(scheduler.config)
|
| 358 |
+
new_config["clip_sample"] = False
|
| 359 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 360 |
+
|
| 361 |
+
if safety_checker is None and requires_safety_checker:
|
| 362 |
+
logger.warning(
|
| 363 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 364 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 365 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 366 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 367 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 368 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
if safety_checker is not None and feature_extractor is None:
|
| 372 |
+
raise ValueError(
|
| 373 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 374 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 378 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 379 |
+
) < version.parse("0.9.0.dev0")
|
| 380 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 381 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 382 |
+
deprecation_message = (
|
| 383 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 384 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 385 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 386 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 387 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 388 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 389 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 390 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 391 |
+
" the `unet/config.json` file"
|
| 392 |
+
)
|
| 393 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 394 |
+
new_config = dict(unet.config)
|
| 395 |
+
new_config["sample_size"] = 64
|
| 396 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 397 |
+
|
| 398 |
+
self.register_modules(
|
| 399 |
+
vae=vae,
|
| 400 |
+
text_encoder=text_encoder,
|
| 401 |
+
tokenizer=tokenizer,
|
| 402 |
+
unet=unet,
|
| 403 |
+
scheduler=scheduler,
|
| 404 |
+
safety_checker=safety_checker,
|
| 405 |
+
feature_extractor=feature_extractor,
|
| 406 |
+
image_encoder=image_encoder,
|
| 407 |
+
)
|
| 408 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 409 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 410 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 411 |
+
|
| 412 |
+
# Initialize the attention hooks for LLM-grounded Diffusion
|
| 413 |
+
self.register_attn_hooks(unet)
|
| 414 |
+
self._saved_attn = None
|
| 415 |
+
|
| 416 |
+
def attn_hook(self, name, query, key, value, attention_probs):
|
| 417 |
+
if name in DEFAULT_GUIDANCE_ATTN_KEYS:
|
| 418 |
+
self._saved_attn[name] = attention_probs
|
| 419 |
+
|
| 420 |
+
@classmethod
|
| 421 |
+
def convert_box(cls, box, height, width):
|
| 422 |
+
# box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max
|
| 423 |
+
x_min, y_min = box[0] / width, box[1] / height
|
| 424 |
+
w_box, h_box = box[2] / width, box[3] / height
|
| 425 |
+
|
| 426 |
+
x_max, y_max = x_min + w_box, y_min + h_box
|
| 427 |
+
|
| 428 |
+
return x_min, y_min, x_max, y_max
|
| 429 |
+
|
| 430 |
+
@classmethod
|
| 431 |
+
def _parse_response_with_negative(cls, text):
|
| 432 |
+
if not text:
|
| 433 |
+
raise ValueError("LLM response is empty")
|
| 434 |
+
|
| 435 |
+
if cls.objects_text in text:
|
| 436 |
+
text = text.split(cls.objects_text)[1]
|
| 437 |
+
|
| 438 |
+
text_split = text.split(cls.bg_prompt_text_no_trailing_space)
|
| 439 |
+
if len(text_split) == 2:
|
| 440 |
+
gen_boxes, text_rem = text_split
|
| 441 |
+
else:
|
| 442 |
+
raise ValueError(f"LLM response is incomplete: {text}")
|
| 443 |
+
|
| 444 |
+
text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space)
|
| 445 |
+
|
| 446 |
+
if len(text_split) == 2:
|
| 447 |
+
bg_prompt, neg_prompt = text_split
|
| 448 |
+
else:
|
| 449 |
+
raise ValueError(f"LLM response is incomplete: {text}")
|
| 450 |
+
|
| 451 |
+
try:
|
| 452 |
+
gen_boxes = ast.literal_eval(gen_boxes)
|
| 453 |
+
except SyntaxError as e:
|
| 454 |
+
# Sometimes the response is in plain text
|
| 455 |
+
if "No objects" in gen_boxes or gen_boxes.strip() == "":
|
| 456 |
+
gen_boxes = []
|
| 457 |
+
else:
|
| 458 |
+
raise e
|
| 459 |
+
bg_prompt = bg_prompt.strip()
|
| 460 |
+
neg_prompt = neg_prompt.strip()
|
| 461 |
+
|
| 462 |
+
# LLM may return "None" to mean no negative prompt provided.
|
| 463 |
+
if neg_prompt == "None":
|
| 464 |
+
neg_prompt = ""
|
| 465 |
+
|
| 466 |
+
return gen_boxes, bg_prompt, neg_prompt
|
| 467 |
+
|
| 468 |
+
@classmethod
|
| 469 |
+
def parse_llm_response(cls, response, canvas_height=512, canvas_width=512):
|
| 470 |
+
# Infer from spec
|
| 471 |
+
gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response)
|
| 472 |
+
|
| 473 |
+
gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0])
|
| 474 |
+
|
| 475 |
+
phrases = [name for name, _ in gen_boxes]
|
| 476 |
+
boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes]
|
| 477 |
+
|
| 478 |
+
return phrases, boxes, bg_prompt, neg_prompt
|
| 479 |
+
|
| 480 |
+
def check_inputs(
|
| 481 |
+
self,
|
| 482 |
+
prompt,
|
| 483 |
+
height,
|
| 484 |
+
width,
|
| 485 |
+
callback_steps,
|
| 486 |
+
phrases,
|
| 487 |
+
boxes,
|
| 488 |
+
negative_prompt=None,
|
| 489 |
+
prompt_embeds=None,
|
| 490 |
+
negative_prompt_embeds=None,
|
| 491 |
+
phrase_indices=None,
|
| 492 |
+
):
|
| 493 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 494 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 495 |
+
|
| 496 |
+
if (callback_steps is None) or (
|
| 497 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 498 |
+
):
|
| 499 |
+
raise ValueError(
|
| 500 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 501 |
+
f" {type(callback_steps)}."
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
if prompt is not None and prompt_embeds is not None:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 507 |
+
" only forward one of the two."
|
| 508 |
+
)
|
| 509 |
+
elif prompt is None and prompt_embeds is None:
|
| 510 |
+
raise ValueError(
|
| 511 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 512 |
+
)
|
| 513 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 514 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 515 |
+
elif prompt is None and phrase_indices is None:
|
| 516 |
+
raise ValueError("If the prompt is None, the phrase_indices cannot be None")
|
| 517 |
+
|
| 518 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 519 |
+
raise ValueError(
|
| 520 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 521 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 525 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 526 |
+
raise ValueError(
|
| 527 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 528 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 529 |
+
f" {negative_prompt_embeds.shape}."
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
if len(phrases) != len(boxes):
|
| 533 |
+
ValueError(
|
| 534 |
+
"length of `phrases` and `boxes` has to be same, but"
|
| 535 |
+
f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}"
|
| 536 |
+
)
|
| 537 |
+
|
| 538 |
+
def register_attn_hooks(self, unet):
|
| 539 |
+
"""Registering hooks to obtain the attention maps for guidance"""
|
| 540 |
+
|
| 541 |
+
attn_procs = {}
|
| 542 |
+
|
| 543 |
+
for name in unet.attn_processors.keys():
|
| 544 |
+
# Only obtain the queries and keys from cross-attention
|
| 545 |
+
if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"):
|
| 546 |
+
# Keep the same attn_processors for self-attention (no hooks for self-attention)
|
| 547 |
+
attn_procs[name] = unet.attn_processors[name]
|
| 548 |
+
continue
|
| 549 |
+
|
| 550 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
| 551 |
+
|
| 552 |
+
if name.startswith("mid_block"):
|
| 553 |
+
hidden_size = unet.config.block_out_channels[-1]
|
| 554 |
+
elif name.startswith("up_blocks"):
|
| 555 |
+
block_id = int(name[len("up_blocks.")])
|
| 556 |
+
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
| 557 |
+
elif name.startswith("down_blocks"):
|
| 558 |
+
block_id = int(name[len("down_blocks.")])
|
| 559 |
+
hidden_size = unet.config.block_out_channels[block_id]
|
| 560 |
+
|
| 561 |
+
attn_procs[name] = AttnProcessorWithHook(
|
| 562 |
+
attn_processor_key=name,
|
| 563 |
+
hidden_size=hidden_size,
|
| 564 |
+
cross_attention_dim=cross_attention_dim,
|
| 565 |
+
hook=self.attn_hook,
|
| 566 |
+
fast_attn=True,
|
| 567 |
+
# Not enabled by default
|
| 568 |
+
enabled=False,
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
unet.set_attn_processor(attn_procs)
|
| 572 |
+
|
| 573 |
+
def enable_fuser(self, enabled=True):
|
| 574 |
+
for module in self.unet.modules():
|
| 575 |
+
if isinstance(module, GatedSelfAttentionDense):
|
| 576 |
+
module.enabled = enabled
|
| 577 |
+
|
| 578 |
+
def enable_attn_hook(self, enabled=True):
|
| 579 |
+
for module in self.unet.attn_processors.values():
|
| 580 |
+
if isinstance(module, AttnProcessorWithHook):
|
| 581 |
+
module.enabled = enabled
|
| 582 |
+
|
| 583 |
+
def get_token_map(self, prompt, padding="do_not_pad", verbose=False):
|
| 584 |
+
"""Get a list of mapping: prompt index to str (prompt in a list of token str)"""
|
| 585 |
+
fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np")
|
| 586 |
+
input_ids = fg_prompt_tokens["input_ids"][0]
|
| 587 |
+
|
| 588 |
+
token_map = []
|
| 589 |
+
for ind, item in enumerate(input_ids.tolist()):
|
| 590 |
+
token = self.tokenizer._convert_id_to_token(item)
|
| 591 |
+
|
| 592 |
+
if verbose:
|
| 593 |
+
logger.info(f"{ind}, {token} ({item})")
|
| 594 |
+
|
| 595 |
+
token_map.append(token)
|
| 596 |
+
|
| 597 |
+
return token_map
|
| 598 |
+
|
| 599 |
+
def get_phrase_indices(
|
| 600 |
+
self,
|
| 601 |
+
prompt,
|
| 602 |
+
phrases,
|
| 603 |
+
token_map=None,
|
| 604 |
+
add_suffix_if_not_found=False,
|
| 605 |
+
verbose=False,
|
| 606 |
+
):
|
| 607 |
+
for obj in phrases:
|
| 608 |
+
# Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix
|
| 609 |
+
if obj not in prompt:
|
| 610 |
+
prompt += "| " + obj
|
| 611 |
+
|
| 612 |
+
if token_map is None:
|
| 613 |
+
# We allow using a pre-computed token map.
|
| 614 |
+
token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose)
|
| 615 |
+
token_map_str = " ".join(token_map)
|
| 616 |
+
|
| 617 |
+
phrase_indices = []
|
| 618 |
+
|
| 619 |
+
for obj in phrases:
|
| 620 |
+
phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose)
|
| 621 |
+
# Remove <bos> and <eos> in substr
|
| 622 |
+
phrase_token_map = phrase_token_map[1:-1]
|
| 623 |
+
phrase_token_map_len = len(phrase_token_map)
|
| 624 |
+
phrase_token_map_str = " ".join(phrase_token_map)
|
| 625 |
+
|
| 626 |
+
if verbose:
|
| 627 |
+
logger.info(
|
| 628 |
+
"Full str:",
|
| 629 |
+
token_map_str,
|
| 630 |
+
"Substr:",
|
| 631 |
+
phrase_token_map_str,
|
| 632 |
+
"Phrase:",
|
| 633 |
+
phrases,
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
# Count the number of token before substr
|
| 637 |
+
# The substring comes with a trailing space that needs to be removed by minus one in the index.
|
| 638 |
+
obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" "))
|
| 639 |
+
|
| 640 |
+
obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len))
|
| 641 |
+
phrase_indices.append(obj_position)
|
| 642 |
+
|
| 643 |
+
if add_suffix_if_not_found:
|
| 644 |
+
return phrase_indices, prompt
|
| 645 |
+
|
| 646 |
+
return phrase_indices
|
| 647 |
+
|
| 648 |
+
def add_ca_loss_per_attn_map_to_loss(
|
| 649 |
+
self,
|
| 650 |
+
loss,
|
| 651 |
+
attn_map,
|
| 652 |
+
object_number,
|
| 653 |
+
bboxes,
|
| 654 |
+
phrase_indices,
|
| 655 |
+
fg_top_p=0.2,
|
| 656 |
+
bg_top_p=0.2,
|
| 657 |
+
fg_weight=1.0,
|
| 658 |
+
bg_weight=1.0,
|
| 659 |
+
):
|
| 660 |
+
# b is the number of heads, not batch
|
| 661 |
+
b, i, j = attn_map.shape
|
| 662 |
+
H = W = int(math.sqrt(i))
|
| 663 |
+
for obj_idx in range(object_number):
|
| 664 |
+
obj_loss = 0
|
| 665 |
+
mask = torch.zeros(size=(H, W), device="cuda")
|
| 666 |
+
obj_boxes = bboxes[obj_idx]
|
| 667 |
+
|
| 668 |
+
# We support two level (one box per phrase) and three level (multiple boxes per phrase)
|
| 669 |
+
if not isinstance(obj_boxes[0], Iterable):
|
| 670 |
+
obj_boxes = [obj_boxes]
|
| 671 |
+
|
| 672 |
+
for obj_box in obj_boxes:
|
| 673 |
+
# x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H)
|
| 674 |
+
x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W)
|
| 675 |
+
mask[y_min:y_max, x_min:x_max] = 1
|
| 676 |
+
|
| 677 |
+
for obj_position in phrase_indices[obj_idx]:
|
| 678 |
+
# Could potentially optimize to compute this for loop in batch.
|
| 679 |
+
# Could crop the ref cross attention before saving to save memory.
|
| 680 |
+
|
| 681 |
+
ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W)
|
| 682 |
+
|
| 683 |
+
# shape: (b, H * W)
|
| 684 |
+
ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W)
|
| 685 |
+
k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1)
|
| 686 |
+
k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1)
|
| 687 |
+
|
| 688 |
+
mask_1d = mask.view(1, -1)
|
| 689 |
+
|
| 690 |
+
# Max-based loss function
|
| 691 |
+
|
| 692 |
+
# Take the topk over spatial dimension, and then take the sum over heads dim
|
| 693 |
+
# The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own.
|
| 694 |
+
obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight
|
| 695 |
+
obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight
|
| 696 |
+
|
| 697 |
+
loss += obj_loss / len(phrase_indices[obj_idx])
|
| 698 |
+
|
| 699 |
+
return loss
|
| 700 |
+
|
| 701 |
+
def compute_ca_loss(
|
| 702 |
+
self,
|
| 703 |
+
saved_attn,
|
| 704 |
+
bboxes,
|
| 705 |
+
phrase_indices,
|
| 706 |
+
guidance_attn_keys,
|
| 707 |
+
verbose=False,
|
| 708 |
+
**kwargs,
|
| 709 |
+
):
|
| 710 |
+
"""
|
| 711 |
+
The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss.
|
| 712 |
+
`AttnProcessor` will put attention maps into the `save_attn_to_dict`.
|
| 713 |
+
|
| 714 |
+
`index` is the timestep.
|
| 715 |
+
`ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token).
|
| 716 |
+
`ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens.
|
| 717 |
+
"""
|
| 718 |
+
loss = torch.tensor(0).float().cuda()
|
| 719 |
+
object_number = len(bboxes)
|
| 720 |
+
if object_number == 0:
|
| 721 |
+
return loss
|
| 722 |
+
|
| 723 |
+
for attn_key in guidance_attn_keys:
|
| 724 |
+
# We only have 1 cross attention for mid.
|
| 725 |
+
|
| 726 |
+
attn_map_integrated = saved_attn[attn_key]
|
| 727 |
+
if not attn_map_integrated.is_cuda:
|
| 728 |
+
attn_map_integrated = attn_map_integrated.cuda()
|
| 729 |
+
# Example dimension: [20, 64, 77]
|
| 730 |
+
attn_map = attn_map_integrated.squeeze(dim=0)
|
| 731 |
+
|
| 732 |
+
loss = self.add_ca_loss_per_attn_map_to_loss(
|
| 733 |
+
loss, attn_map, object_number, bboxes, phrase_indices, **kwargs
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
num_attn = len(guidance_attn_keys)
|
| 737 |
+
|
| 738 |
+
if num_attn > 0:
|
| 739 |
+
loss = loss / (object_number * num_attn)
|
| 740 |
+
|
| 741 |
+
return loss
|
| 742 |
+
|
| 743 |
+
@torch.no_grad()
|
| 744 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 745 |
+
def __call__(
|
| 746 |
+
self,
|
| 747 |
+
prompt: Union[str, List[str]] = None,
|
| 748 |
+
height: Optional[int] = None,
|
| 749 |
+
width: Optional[int] = None,
|
| 750 |
+
num_inference_steps: int = 50,
|
| 751 |
+
guidance_scale: float = 7.5,
|
| 752 |
+
gligen_scheduled_sampling_beta: float = 0.3,
|
| 753 |
+
phrases: List[str] = None,
|
| 754 |
+
boxes: List[List[float]] = None,
|
| 755 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 756 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 757 |
+
eta: float = 0.0,
|
| 758 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 759 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 760 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 761 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 762 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 763 |
+
output_type: Optional[str] = "pil",
|
| 764 |
+
return_dict: bool = True,
|
| 765 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 766 |
+
callback_steps: int = 1,
|
| 767 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 768 |
+
clip_skip: Optional[int] = None,
|
| 769 |
+
lmd_guidance_kwargs: Optional[Dict[str, Any]] = {},
|
| 770 |
+
phrase_indices: Optional[List[int]] = None,
|
| 771 |
+
):
|
| 772 |
+
r"""
|
| 773 |
+
The call function to the pipeline for generation.
|
| 774 |
+
|
| 775 |
+
Args:
|
| 776 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 777 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 778 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 779 |
+
The height in pixels of the generated image.
|
| 780 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 781 |
+
The width in pixels of the generated image.
|
| 782 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 783 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 784 |
+
expense of slower inference.
|
| 785 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 786 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 787 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 788 |
+
phrases (`List[str]`):
|
| 789 |
+
The phrases to guide what to include in each of the regions defined by the corresponding
|
| 790 |
+
`boxes`. There should only be one phrase per bounding box.
|
| 791 |
+
boxes (`List[List[float]]`):
|
| 792 |
+
The bounding boxes that identify rectangular regions of the image that are going to be filled with the
|
| 793 |
+
content described by the corresponding `phrases`. Each rectangular box is defined as a
|
| 794 |
+
`List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
|
| 795 |
+
gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
|
| 796 |
+
Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
|
| 797 |
+
Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for
|
| 798 |
+
scheduled sampling during inference for improved quality and controllability.
|
| 799 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 800 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 801 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 802 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 803 |
+
The number of images to generate per prompt.
|
| 804 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 805 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 806 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 807 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 808 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 809 |
+
generation deterministic.
|
| 810 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 811 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 812 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 813 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 814 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 815 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 816 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 817 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 818 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 819 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 820 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 821 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 822 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 823 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 824 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 825 |
+
plain tuple.
|
| 826 |
+
callback (`Callable`, *optional*):
|
| 827 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 828 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 829 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 830 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 831 |
+
every step.
|
| 832 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 833 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 834 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 835 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 836 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 837 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 838 |
+
using zero terminal SNR.
|
| 839 |
+
clip_skip (`int`, *optional*):
|
| 840 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 841 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 842 |
+
lmd_guidance_kwargs (`dict`, *optional*):
|
| 843 |
+
A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details.
|
| 844 |
+
phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default.
|
| 845 |
+
Examples:
|
| 846 |
+
|
| 847 |
+
Returns:
|
| 848 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 849 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 850 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 851 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 852 |
+
"not-safe-for-work" (nsfw) content.
|
| 853 |
+
"""
|
| 854 |
+
# 0. Default height and width to unet
|
| 855 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 856 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 857 |
+
|
| 858 |
+
# 1. Check inputs. Raise error if not correct
|
| 859 |
+
self.check_inputs(
|
| 860 |
+
prompt,
|
| 861 |
+
height,
|
| 862 |
+
width,
|
| 863 |
+
callback_steps,
|
| 864 |
+
phrases,
|
| 865 |
+
boxes,
|
| 866 |
+
negative_prompt,
|
| 867 |
+
prompt_embeds,
|
| 868 |
+
negative_prompt_embeds,
|
| 869 |
+
phrase_indices,
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
# 2. Define call parameters
|
| 873 |
+
if prompt is not None and isinstance(prompt, str):
|
| 874 |
+
batch_size = 1
|
| 875 |
+
if phrase_indices is None:
|
| 876 |
+
phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True)
|
| 877 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 878 |
+
batch_size = len(prompt)
|
| 879 |
+
if phrase_indices is None:
|
| 880 |
+
phrase_indices = []
|
| 881 |
+
prompt_parsed = []
|
| 882 |
+
for prompt_item in prompt:
|
| 883 |
+
(
|
| 884 |
+
phrase_indices_parsed_item,
|
| 885 |
+
prompt_parsed_item,
|
| 886 |
+
) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True)
|
| 887 |
+
phrase_indices.append(phrase_indices_parsed_item)
|
| 888 |
+
prompt_parsed.append(prompt_parsed_item)
|
| 889 |
+
prompt = prompt_parsed
|
| 890 |
+
else:
|
| 891 |
+
batch_size = prompt_embeds.shape[0]
|
| 892 |
+
|
| 893 |
+
device = self._execution_device
|
| 894 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 895 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 896 |
+
# corresponds to doing no classifier free guidance.
|
| 897 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 898 |
+
|
| 899 |
+
# 3. Encode input prompt
|
| 900 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 901 |
+
prompt,
|
| 902 |
+
device,
|
| 903 |
+
num_images_per_prompt,
|
| 904 |
+
do_classifier_free_guidance,
|
| 905 |
+
negative_prompt,
|
| 906 |
+
prompt_embeds=prompt_embeds,
|
| 907 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 908 |
+
clip_skip=clip_skip,
|
| 909 |
+
)
|
| 910 |
+
|
| 911 |
+
cond_prompt_embeds = prompt_embeds
|
| 912 |
+
|
| 913 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 914 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 915 |
+
# to avoid doing two forward passes
|
| 916 |
+
if do_classifier_free_guidance:
|
| 917 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 918 |
+
|
| 919 |
+
if ip_adapter_image is not None:
|
| 920 |
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
| 921 |
+
if self.do_classifier_free_guidance:
|
| 922 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 923 |
+
|
| 924 |
+
# 4. Prepare timesteps
|
| 925 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 926 |
+
timesteps = self.scheduler.timesteps
|
| 927 |
+
|
| 928 |
+
# 5. Prepare latent variables
|
| 929 |
+
num_channels_latents = self.unet.config.in_channels
|
| 930 |
+
latents = self.prepare_latents(
|
| 931 |
+
batch_size * num_images_per_prompt,
|
| 932 |
+
num_channels_latents,
|
| 933 |
+
height,
|
| 934 |
+
width,
|
| 935 |
+
prompt_embeds.dtype,
|
| 936 |
+
device,
|
| 937 |
+
generator,
|
| 938 |
+
latents,
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
# 5.1 Prepare GLIGEN variables
|
| 942 |
+
max_objs = 30
|
| 943 |
+
if len(boxes) > max_objs:
|
| 944 |
+
warnings.warn(
|
| 945 |
+
f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
|
| 946 |
+
FutureWarning,
|
| 947 |
+
)
|
| 948 |
+
phrases = phrases[:max_objs]
|
| 949 |
+
boxes = boxes[:max_objs]
|
| 950 |
+
|
| 951 |
+
n_objs = len(boxes)
|
| 952 |
+
if n_objs:
|
| 953 |
+
# prepare batched input to the PositionNet (boxes, phrases, mask)
|
| 954 |
+
# Get tokens for phrases from pre-trained CLIPTokenizer
|
| 955 |
+
tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device)
|
| 956 |
+
# For the token, we use the same pre-trained text encoder
|
| 957 |
+
# to obtain its text feature
|
| 958 |
+
_text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
|
| 959 |
+
|
| 960 |
+
# For each entity, described in phrases, is denoted with a bounding box,
|
| 961 |
+
# we represent the location information as (xmin,ymin,xmax,ymax)
|
| 962 |
+
cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
|
| 963 |
+
if n_objs:
|
| 964 |
+
cond_boxes[:n_objs] = torch.tensor(boxes)
|
| 965 |
+
text_embeddings = torch.zeros(
|
| 966 |
+
max_objs,
|
| 967 |
+
self.unet.config.cross_attention_dim,
|
| 968 |
+
device=device,
|
| 969 |
+
dtype=self.text_encoder.dtype,
|
| 970 |
+
)
|
| 971 |
+
if n_objs:
|
| 972 |
+
text_embeddings[:n_objs] = _text_embeddings
|
| 973 |
+
# Generate a mask for each object that is entity described by phrases
|
| 974 |
+
masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
|
| 975 |
+
masks[:n_objs] = 1
|
| 976 |
+
|
| 977 |
+
repeat_batch = batch_size * num_images_per_prompt
|
| 978 |
+
cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
| 979 |
+
text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
| 980 |
+
masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
|
| 981 |
+
if do_classifier_free_guidance:
|
| 982 |
+
repeat_batch = repeat_batch * 2
|
| 983 |
+
cond_boxes = torch.cat([cond_boxes] * 2)
|
| 984 |
+
text_embeddings = torch.cat([text_embeddings] * 2)
|
| 985 |
+
masks = torch.cat([masks] * 2)
|
| 986 |
+
masks[: repeat_batch // 2] = 0
|
| 987 |
+
if cross_attention_kwargs is None:
|
| 988 |
+
cross_attention_kwargs = {}
|
| 989 |
+
cross_attention_kwargs["gligen"] = {
|
| 990 |
+
"boxes": cond_boxes,
|
| 991 |
+
"positive_embeddings": text_embeddings,
|
| 992 |
+
"masks": masks,
|
| 993 |
+
}
|
| 994 |
+
|
| 995 |
+
num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
|
| 996 |
+
self.enable_fuser(True)
|
| 997 |
+
|
| 998 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 999 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1000 |
+
|
| 1001 |
+
# 6.1 Add image embeds for IP-Adapter
|
| 1002 |
+
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
| 1003 |
+
|
| 1004 |
+
loss_attn = torch.tensor(10000.0)
|
| 1005 |
+
|
| 1006 |
+
# 7. Denoising loop
|
| 1007 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1008 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1009 |
+
for i, t in enumerate(timesteps):
|
| 1010 |
+
# Scheduled sampling
|
| 1011 |
+
if i == num_grounding_steps:
|
| 1012 |
+
self.enable_fuser(False)
|
| 1013 |
+
|
| 1014 |
+
if latents.shape[1] != 4:
|
| 1015 |
+
latents = torch.randn_like(latents[:, :4])
|
| 1016 |
+
|
| 1017 |
+
# 7.1 Perform LMD guidance
|
| 1018 |
+
if boxes:
|
| 1019 |
+
latents, loss_attn = self.latent_lmd_guidance(
|
| 1020 |
+
cond_prompt_embeds,
|
| 1021 |
+
index=i,
|
| 1022 |
+
boxes=boxes,
|
| 1023 |
+
phrase_indices=phrase_indices,
|
| 1024 |
+
t=t,
|
| 1025 |
+
latents=latents,
|
| 1026 |
+
loss=loss_attn,
|
| 1027 |
+
**lmd_guidance_kwargs,
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
# expand the latents if we are doing classifier free guidance
|
| 1031 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1032 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1033 |
+
|
| 1034 |
+
# predict the noise residual
|
| 1035 |
+
noise_pred = self.unet(
|
| 1036 |
+
latent_model_input,
|
| 1037 |
+
t,
|
| 1038 |
+
encoder_hidden_states=prompt_embeds,
|
| 1039 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1040 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1041 |
+
).sample
|
| 1042 |
+
|
| 1043 |
+
# perform guidance
|
| 1044 |
+
if do_classifier_free_guidance:
|
| 1045 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1046 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1047 |
+
|
| 1048 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1049 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 1050 |
+
|
| 1051 |
+
# call the callback, if provided
|
| 1052 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1053 |
+
progress_bar.update()
|
| 1054 |
+
if callback is not None and i % callback_steps == 0:
|
| 1055 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1056 |
+
callback(step_idx, t, latents)
|
| 1057 |
+
|
| 1058 |
+
if not output_type == "latent":
|
| 1059 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1060 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1061 |
+
else:
|
| 1062 |
+
image = latents
|
| 1063 |
+
has_nsfw_concept = None
|
| 1064 |
+
|
| 1065 |
+
if has_nsfw_concept is None:
|
| 1066 |
+
do_denormalize = [True] * image.shape[0]
|
| 1067 |
+
else:
|
| 1068 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1069 |
+
|
| 1070 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1071 |
+
|
| 1072 |
+
# Offload last model to CPU
|
| 1073 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1074 |
+
self.final_offload_hook.offload()
|
| 1075 |
+
|
| 1076 |
+
if not return_dict:
|
| 1077 |
+
return (image, has_nsfw_concept)
|
| 1078 |
+
|
| 1079 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1080 |
+
|
| 1081 |
+
@torch.set_grad_enabled(True)
|
| 1082 |
+
def latent_lmd_guidance(
|
| 1083 |
+
self,
|
| 1084 |
+
cond_embeddings,
|
| 1085 |
+
index,
|
| 1086 |
+
boxes,
|
| 1087 |
+
phrase_indices,
|
| 1088 |
+
t,
|
| 1089 |
+
latents,
|
| 1090 |
+
loss,
|
| 1091 |
+
*,
|
| 1092 |
+
loss_scale=20,
|
| 1093 |
+
loss_threshold=5.0,
|
| 1094 |
+
max_iter=[3] * 5 + [2] * 5 + [1] * 5,
|
| 1095 |
+
guidance_timesteps=15,
|
| 1096 |
+
cross_attention_kwargs=None,
|
| 1097 |
+
guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS,
|
| 1098 |
+
verbose=False,
|
| 1099 |
+
clear_cache=False,
|
| 1100 |
+
unet_additional_kwargs={},
|
| 1101 |
+
guidance_callback=None,
|
| 1102 |
+
**kwargs,
|
| 1103 |
+
):
|
| 1104 |
+
scheduler, unet = self.scheduler, self.unet
|
| 1105 |
+
|
| 1106 |
+
iteration = 0
|
| 1107 |
+
|
| 1108 |
+
if index < guidance_timesteps:
|
| 1109 |
+
if isinstance(max_iter, list):
|
| 1110 |
+
max_iter = max_iter[index]
|
| 1111 |
+
|
| 1112 |
+
if verbose:
|
| 1113 |
+
logger.info(
|
| 1114 |
+
f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}"
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
try:
|
| 1118 |
+
self.enable_attn_hook(enabled=True)
|
| 1119 |
+
|
| 1120 |
+
while (
|
| 1121 |
+
loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps
|
| 1122 |
+
):
|
| 1123 |
+
self._saved_attn = {}
|
| 1124 |
+
|
| 1125 |
+
latents.requires_grad_(True)
|
| 1126 |
+
latent_model_input = latents
|
| 1127 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
| 1128 |
+
|
| 1129 |
+
unet(
|
| 1130 |
+
latent_model_input,
|
| 1131 |
+
t,
|
| 1132 |
+
encoder_hidden_states=cond_embeddings,
|
| 1133 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1134 |
+
**unet_additional_kwargs,
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
# update latents with guidance
|
| 1138 |
+
loss = (
|
| 1139 |
+
self.compute_ca_loss(
|
| 1140 |
+
saved_attn=self._saved_attn,
|
| 1141 |
+
bboxes=boxes,
|
| 1142 |
+
phrase_indices=phrase_indices,
|
| 1143 |
+
guidance_attn_keys=guidance_attn_keys,
|
| 1144 |
+
verbose=verbose,
|
| 1145 |
+
**kwargs,
|
| 1146 |
+
)
|
| 1147 |
+
* loss_scale
|
| 1148 |
+
)
|
| 1149 |
+
|
| 1150 |
+
if torch.isnan(loss):
|
| 1151 |
+
raise RuntimeError("**Loss is NaN**")
|
| 1152 |
+
|
| 1153 |
+
# This callback allows visualizations.
|
| 1154 |
+
if guidance_callback is not None:
|
| 1155 |
+
guidance_callback(self, latents, loss, iteration, index)
|
| 1156 |
+
|
| 1157 |
+
self._saved_attn = None
|
| 1158 |
+
|
| 1159 |
+
grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0]
|
| 1160 |
+
|
| 1161 |
+
latents.requires_grad_(False)
|
| 1162 |
+
|
| 1163 |
+
# Scaling with classifier guidance
|
| 1164 |
+
alpha_prod_t = scheduler.alphas_cumprod[t]
|
| 1165 |
+
# Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf
|
| 1166 |
+
# DDIM: https://arxiv.org/pdf/2010.02502.pdf
|
| 1167 |
+
scale = (1 - alpha_prod_t) ** (0.5)
|
| 1168 |
+
latents = latents - scale * grad_cond
|
| 1169 |
+
|
| 1170 |
+
iteration += 1
|
| 1171 |
+
|
| 1172 |
+
if clear_cache:
|
| 1173 |
+
gc.collect()
|
| 1174 |
+
torch.cuda.empty_cache()
|
| 1175 |
+
|
| 1176 |
+
if verbose:
|
| 1177 |
+
logger.info(
|
| 1178 |
+
f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}"
|
| 1179 |
+
)
|
| 1180 |
+
|
| 1181 |
+
finally:
|
| 1182 |
+
self.enable_attn_hook(enabled=False)
|
| 1183 |
+
|
| 1184 |
+
return latents, loss
|
| 1185 |
+
|
| 1186 |
+
# Below are methods copied from StableDiffusionPipeline
|
| 1187 |
+
# The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517
|
| 1188 |
+
|
| 1189 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 1190 |
+
def _encode_prompt(
|
| 1191 |
+
self,
|
| 1192 |
+
prompt,
|
| 1193 |
+
device,
|
| 1194 |
+
num_images_per_prompt,
|
| 1195 |
+
do_classifier_free_guidance,
|
| 1196 |
+
negative_prompt=None,
|
| 1197 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1198 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1199 |
+
lora_scale: Optional[float] = None,
|
| 1200 |
+
**kwargs,
|
| 1201 |
+
):
|
| 1202 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 1203 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 1204 |
+
|
| 1205 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 1206 |
+
prompt=prompt,
|
| 1207 |
+
device=device,
|
| 1208 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1209 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1210 |
+
negative_prompt=negative_prompt,
|
| 1211 |
+
prompt_embeds=prompt_embeds,
|
| 1212 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1213 |
+
lora_scale=lora_scale,
|
| 1214 |
+
**kwargs,
|
| 1215 |
+
)
|
| 1216 |
+
|
| 1217 |
+
# concatenate for backwards comp
|
| 1218 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 1219 |
+
|
| 1220 |
+
return prompt_embeds
|
| 1221 |
+
|
| 1222 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
|
| 1223 |
+
def encode_prompt(
|
| 1224 |
+
self,
|
| 1225 |
+
prompt,
|
| 1226 |
+
device,
|
| 1227 |
+
num_images_per_prompt,
|
| 1228 |
+
do_classifier_free_guidance,
|
| 1229 |
+
negative_prompt=None,
|
| 1230 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1231 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1232 |
+
lora_scale: Optional[float] = None,
|
| 1233 |
+
clip_skip: Optional[int] = None,
|
| 1234 |
+
):
|
| 1235 |
+
r"""
|
| 1236 |
+
Encodes the prompt into text encoder hidden states.
|
| 1237 |
+
|
| 1238 |
+
Args:
|
| 1239 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1240 |
+
prompt to be encoded
|
| 1241 |
+
device: (`torch.device`):
|
| 1242 |
+
torch device
|
| 1243 |
+
num_images_per_prompt (`int`):
|
| 1244 |
+
number of images that should be generated per prompt
|
| 1245 |
+
do_classifier_free_guidance (`bool`):
|
| 1246 |
+
whether to use classifier free guidance or not
|
| 1247 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1248 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1249 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1250 |
+
less than `1`).
|
| 1251 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1252 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1253 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1254 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1255 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1256 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1257 |
+
argument.
|
| 1258 |
+
lora_scale (`float`, *optional*):
|
| 1259 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 1260 |
+
clip_skip (`int`, *optional*):
|
| 1261 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1262 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1263 |
+
"""
|
| 1264 |
+
# set lora scale so that monkey patched LoRA
|
| 1265 |
+
# function of text encoder can correctly access it
|
| 1266 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 1267 |
+
self._lora_scale = lora_scale
|
| 1268 |
+
|
| 1269 |
+
# dynamically adjust the LoRA scale
|
| 1270 |
+
if not USE_PEFT_BACKEND:
|
| 1271 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 1272 |
+
else:
|
| 1273 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 1274 |
+
|
| 1275 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1276 |
+
batch_size = 1
|
| 1277 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1278 |
+
batch_size = len(prompt)
|
| 1279 |
+
else:
|
| 1280 |
+
batch_size = prompt_embeds.shape[0]
|
| 1281 |
+
|
| 1282 |
+
if prompt_embeds is None:
|
| 1283 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 1284 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 1285 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 1286 |
+
|
| 1287 |
+
text_inputs = self.tokenizer(
|
| 1288 |
+
prompt,
|
| 1289 |
+
padding="max_length",
|
| 1290 |
+
max_length=self.tokenizer.model_max_length,
|
| 1291 |
+
truncation=True,
|
| 1292 |
+
return_tensors="pt",
|
| 1293 |
+
)
|
| 1294 |
+
text_input_ids = text_inputs.input_ids
|
| 1295 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 1296 |
+
|
| 1297 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 1298 |
+
text_input_ids, untruncated_ids
|
| 1299 |
+
):
|
| 1300 |
+
removed_text = self.tokenizer.batch_decode(
|
| 1301 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 1302 |
+
)
|
| 1303 |
+
logger.warning(
|
| 1304 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 1305 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 1306 |
+
)
|
| 1307 |
+
|
| 1308 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 1309 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 1310 |
+
else:
|
| 1311 |
+
attention_mask = None
|
| 1312 |
+
|
| 1313 |
+
if clip_skip is None:
|
| 1314 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 1315 |
+
prompt_embeds = prompt_embeds[0]
|
| 1316 |
+
else:
|
| 1317 |
+
prompt_embeds = self.text_encoder(
|
| 1318 |
+
text_input_ids.to(device),
|
| 1319 |
+
attention_mask=attention_mask,
|
| 1320 |
+
output_hidden_states=True,
|
| 1321 |
+
)
|
| 1322 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 1323 |
+
# all the hidden states from the encoder layers. Then index into
|
| 1324 |
+
# the tuple to access the hidden states from the desired layer.
|
| 1325 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 1326 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 1327 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 1328 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 1329 |
+
# layer.
|
| 1330 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 1331 |
+
|
| 1332 |
+
if self.text_encoder is not None:
|
| 1333 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 1334 |
+
elif self.unet is not None:
|
| 1335 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 1336 |
+
else:
|
| 1337 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 1338 |
+
|
| 1339 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 1340 |
+
|
| 1341 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 1342 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 1343 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 1344 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 1345 |
+
|
| 1346 |
+
# get unconditional embeddings for classifier free guidance
|
| 1347 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 1348 |
+
uncond_tokens: List[str]
|
| 1349 |
+
if negative_prompt is None:
|
| 1350 |
+
uncond_tokens = [""] * batch_size
|
| 1351 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 1352 |
+
raise TypeError(
|
| 1353 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 1354 |
+
f" {type(prompt)}."
|
| 1355 |
+
)
|
| 1356 |
+
elif isinstance(negative_prompt, str):
|
| 1357 |
+
uncond_tokens = [negative_prompt]
|
| 1358 |
+
elif batch_size != len(negative_prompt):
|
| 1359 |
+
raise ValueError(
|
| 1360 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 1361 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 1362 |
+
" the batch size of `prompt`."
|
| 1363 |
+
)
|
| 1364 |
+
else:
|
| 1365 |
+
uncond_tokens = negative_prompt
|
| 1366 |
+
|
| 1367 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 1368 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 1369 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 1370 |
+
|
| 1371 |
+
max_length = prompt_embeds.shape[1]
|
| 1372 |
+
uncond_input = self.tokenizer(
|
| 1373 |
+
uncond_tokens,
|
| 1374 |
+
padding="max_length",
|
| 1375 |
+
max_length=max_length,
|
| 1376 |
+
truncation=True,
|
| 1377 |
+
return_tensors="pt",
|
| 1378 |
+
)
|
| 1379 |
+
|
| 1380 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 1381 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 1382 |
+
else:
|
| 1383 |
+
attention_mask = None
|
| 1384 |
+
|
| 1385 |
+
negative_prompt_embeds = self.text_encoder(
|
| 1386 |
+
uncond_input.input_ids.to(device),
|
| 1387 |
+
attention_mask=attention_mask,
|
| 1388 |
+
)
|
| 1389 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 1390 |
+
|
| 1391 |
+
if do_classifier_free_guidance:
|
| 1392 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 1393 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 1394 |
+
|
| 1395 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 1396 |
+
|
| 1397 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 1398 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 1399 |
+
|
| 1400 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 1401 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 1402 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 1403 |
+
|
| 1404 |
+
return prompt_embeds, negative_prompt_embeds
|
| 1405 |
+
|
| 1406 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 1407 |
+
def encode_image(self, image, device, num_images_per_prompt):
|
| 1408 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 1409 |
+
|
| 1410 |
+
if not isinstance(image, torch.Tensor):
|
| 1411 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 1412 |
+
|
| 1413 |
+
image = image.to(device=device, dtype=dtype)
|
| 1414 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 1415 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 1416 |
+
|
| 1417 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 1418 |
+
return image_embeds, uncond_image_embeds
|
| 1419 |
+
|
| 1420 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 1421 |
+
def run_safety_checker(self, image, device, dtype):
|
| 1422 |
+
if self.safety_checker is None:
|
| 1423 |
+
has_nsfw_concept = None
|
| 1424 |
+
else:
|
| 1425 |
+
if torch.is_tensor(image):
|
| 1426 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 1427 |
+
else:
|
| 1428 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 1429 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 1430 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 1431 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 1432 |
+
)
|
| 1433 |
+
return image, has_nsfw_concept
|
| 1434 |
+
|
| 1435 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
| 1436 |
+
def decode_latents(self, latents):
|
| 1437 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 1438 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 1439 |
+
|
| 1440 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 1441 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1442 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 1443 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 1444 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 1445 |
+
return image
|
| 1446 |
+
|
| 1447 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 1448 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 1449 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 1450 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 1451 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 1452 |
+
# and should be between [0, 1]
|
| 1453 |
+
|
| 1454 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 1455 |
+
extra_step_kwargs = {}
|
| 1456 |
+
if accepts_eta:
|
| 1457 |
+
extra_step_kwargs["eta"] = eta
|
| 1458 |
+
|
| 1459 |
+
# check if the scheduler accepts generator
|
| 1460 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 1461 |
+
if accepts_generator:
|
| 1462 |
+
extra_step_kwargs["generator"] = generator
|
| 1463 |
+
return extra_step_kwargs
|
| 1464 |
+
|
| 1465 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 1466 |
+
def prepare_latents(
|
| 1467 |
+
self,
|
| 1468 |
+
batch_size,
|
| 1469 |
+
num_channels_latents,
|
| 1470 |
+
height,
|
| 1471 |
+
width,
|
| 1472 |
+
dtype,
|
| 1473 |
+
device,
|
| 1474 |
+
generator,
|
| 1475 |
+
latents=None,
|
| 1476 |
+
):
|
| 1477 |
+
shape = (
|
| 1478 |
+
batch_size,
|
| 1479 |
+
num_channels_latents,
|
| 1480 |
+
height // self.vae_scale_factor,
|
| 1481 |
+
width // self.vae_scale_factor,
|
| 1482 |
+
)
|
| 1483 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 1484 |
+
raise ValueError(
|
| 1485 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 1486 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 1487 |
+
)
|
| 1488 |
+
|
| 1489 |
+
if latents is None:
|
| 1490 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 1491 |
+
else:
|
| 1492 |
+
latents = latents.to(device)
|
| 1493 |
+
|
| 1494 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 1495 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 1496 |
+
return latents
|
| 1497 |
+
|
| 1498 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 1499 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 1500 |
+
"""
|
| 1501 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 1502 |
+
|
| 1503 |
+
Args:
|
| 1504 |
+
timesteps (`torch.Tensor`):
|
| 1505 |
+
generate embedding vectors at these timesteps
|
| 1506 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 1507 |
+
dimension of the embeddings to generate
|
| 1508 |
+
dtype:
|
| 1509 |
+
data type of the generated embeddings
|
| 1510 |
+
|
| 1511 |
+
Returns:
|
| 1512 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 1513 |
+
"""
|
| 1514 |
+
assert len(w.shape) == 1
|
| 1515 |
+
w = w * 1000.0
|
| 1516 |
+
|
| 1517 |
+
half_dim = embedding_dim // 2
|
| 1518 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 1519 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 1520 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 1521 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 1522 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 1523 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 1524 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 1525 |
+
return emb
|
| 1526 |
+
|
| 1527 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
|
| 1528 |
+
@property
|
| 1529 |
+
def guidance_scale(self):
|
| 1530 |
+
return self._guidance_scale
|
| 1531 |
+
|
| 1532 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale
|
| 1533 |
+
@property
|
| 1534 |
+
def guidance_rescale(self):
|
| 1535 |
+
return self._guidance_rescale
|
| 1536 |
+
|
| 1537 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
|
| 1538 |
+
@property
|
| 1539 |
+
def clip_skip(self):
|
| 1540 |
+
return self._clip_skip
|
| 1541 |
+
|
| 1542 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1543 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1544 |
+
# corresponds to doing no classifier free guidance.
|
| 1545 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
|
| 1546 |
+
@property
|
| 1547 |
+
def do_classifier_free_guidance(self):
|
| 1548 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 1549 |
+
|
| 1550 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
|
| 1551 |
+
@property
|
| 1552 |
+
def cross_attention_kwargs(self):
|
| 1553 |
+
return self._cross_attention_kwargs
|
| 1554 |
+
|
| 1555 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
|
| 1556 |
+
@property
|
| 1557 |
+
def num_timesteps(self):
|
| 1558 |
+
return self._num_timesteps
|
v0.27.0/lpw_stable_diffusion.py
ADDED
|
@@ -0,0 +1,1364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import re
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from packaging import version
|
| 9 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 10 |
+
|
| 11 |
+
from diffusers import DiffusionPipeline
|
| 12 |
+
from diffusers.configuration_utils import FrozenDict
|
| 13 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 14 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 15 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 16 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 17 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
| 18 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 19 |
+
from diffusers.utils import (
|
| 20 |
+
PIL_INTERPOLATION,
|
| 21 |
+
deprecate,
|
| 22 |
+
logging,
|
| 23 |
+
)
|
| 24 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ------------------------------------------------------------------------------
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 30 |
+
|
| 31 |
+
re_attention = re.compile(
|
| 32 |
+
r"""
|
| 33 |
+
\\\(|
|
| 34 |
+
\\\)|
|
| 35 |
+
\\\[|
|
| 36 |
+
\\]|
|
| 37 |
+
\\\\|
|
| 38 |
+
\\|
|
| 39 |
+
\(|
|
| 40 |
+
\[|
|
| 41 |
+
:([+-]?[.\d]+)\)|
|
| 42 |
+
\)|
|
| 43 |
+
]|
|
| 44 |
+
[^\\()\[\]:]+|
|
| 45 |
+
:
|
| 46 |
+
""",
|
| 47 |
+
re.X,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def parse_prompt_attention(text):
|
| 52 |
+
"""
|
| 53 |
+
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
| 54 |
+
Accepted tokens are:
|
| 55 |
+
(abc) - increases attention to abc by a multiplier of 1.1
|
| 56 |
+
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
| 57 |
+
[abc] - decreases attention to abc by a multiplier of 1.1
|
| 58 |
+
\\( - literal character '('
|
| 59 |
+
\\[ - literal character '['
|
| 60 |
+
\\) - literal character ')'
|
| 61 |
+
\\] - literal character ']'
|
| 62 |
+
\\ - literal character '\'
|
| 63 |
+
anything else - just text
|
| 64 |
+
>>> parse_prompt_attention('normal text')
|
| 65 |
+
[['normal text', 1.0]]
|
| 66 |
+
>>> parse_prompt_attention('an (important) word')
|
| 67 |
+
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
| 68 |
+
>>> parse_prompt_attention('(unbalanced')
|
| 69 |
+
[['unbalanced', 1.1]]
|
| 70 |
+
>>> parse_prompt_attention('\\(literal\\]')
|
| 71 |
+
[['(literal]', 1.0]]
|
| 72 |
+
>>> parse_prompt_attention('(unnecessary)(parens)')
|
| 73 |
+
[['unnecessaryparens', 1.1]]
|
| 74 |
+
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
| 75 |
+
[['a ', 1.0],
|
| 76 |
+
['house', 1.5730000000000004],
|
| 77 |
+
[' ', 1.1],
|
| 78 |
+
['on', 1.0],
|
| 79 |
+
[' a ', 1.1],
|
| 80 |
+
['hill', 0.55],
|
| 81 |
+
[', sun, ', 1.1],
|
| 82 |
+
['sky', 1.4641000000000006],
|
| 83 |
+
['.', 1.1]]
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
res = []
|
| 87 |
+
round_brackets = []
|
| 88 |
+
square_brackets = []
|
| 89 |
+
|
| 90 |
+
round_bracket_multiplier = 1.1
|
| 91 |
+
square_bracket_multiplier = 1 / 1.1
|
| 92 |
+
|
| 93 |
+
def multiply_range(start_position, multiplier):
|
| 94 |
+
for p in range(start_position, len(res)):
|
| 95 |
+
res[p][1] *= multiplier
|
| 96 |
+
|
| 97 |
+
for m in re_attention.finditer(text):
|
| 98 |
+
text = m.group(0)
|
| 99 |
+
weight = m.group(1)
|
| 100 |
+
|
| 101 |
+
if text.startswith("\\"):
|
| 102 |
+
res.append([text[1:], 1.0])
|
| 103 |
+
elif text == "(":
|
| 104 |
+
round_brackets.append(len(res))
|
| 105 |
+
elif text == "[":
|
| 106 |
+
square_brackets.append(len(res))
|
| 107 |
+
elif weight is not None and len(round_brackets) > 0:
|
| 108 |
+
multiply_range(round_brackets.pop(), float(weight))
|
| 109 |
+
elif text == ")" and len(round_brackets) > 0:
|
| 110 |
+
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
| 111 |
+
elif text == "]" and len(square_brackets) > 0:
|
| 112 |
+
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
| 113 |
+
else:
|
| 114 |
+
res.append([text, 1.0])
|
| 115 |
+
|
| 116 |
+
for pos in round_brackets:
|
| 117 |
+
multiply_range(pos, round_bracket_multiplier)
|
| 118 |
+
|
| 119 |
+
for pos in square_brackets:
|
| 120 |
+
multiply_range(pos, square_bracket_multiplier)
|
| 121 |
+
|
| 122 |
+
if len(res) == 0:
|
| 123 |
+
res = [["", 1.0]]
|
| 124 |
+
|
| 125 |
+
# merge runs of identical weights
|
| 126 |
+
i = 0
|
| 127 |
+
while i + 1 < len(res):
|
| 128 |
+
if res[i][1] == res[i + 1][1]:
|
| 129 |
+
res[i][0] += res[i + 1][0]
|
| 130 |
+
res.pop(i + 1)
|
| 131 |
+
else:
|
| 132 |
+
i += 1
|
| 133 |
+
|
| 134 |
+
return res
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
|
| 138 |
+
r"""
|
| 139 |
+
Tokenize a list of prompts and return its tokens with weights of each token.
|
| 140 |
+
|
| 141 |
+
No padding, starting or ending token is included.
|
| 142 |
+
"""
|
| 143 |
+
tokens = []
|
| 144 |
+
weights = []
|
| 145 |
+
truncated = False
|
| 146 |
+
for text in prompt:
|
| 147 |
+
texts_and_weights = parse_prompt_attention(text)
|
| 148 |
+
text_token = []
|
| 149 |
+
text_weight = []
|
| 150 |
+
for word, weight in texts_and_weights:
|
| 151 |
+
# tokenize and discard the starting and the ending token
|
| 152 |
+
token = pipe.tokenizer(word).input_ids[1:-1]
|
| 153 |
+
text_token += token
|
| 154 |
+
# copy the weight by length of token
|
| 155 |
+
text_weight += [weight] * len(token)
|
| 156 |
+
# stop if the text is too long (longer than truncation limit)
|
| 157 |
+
if len(text_token) > max_length:
|
| 158 |
+
truncated = True
|
| 159 |
+
break
|
| 160 |
+
# truncate
|
| 161 |
+
if len(text_token) > max_length:
|
| 162 |
+
truncated = True
|
| 163 |
+
text_token = text_token[:max_length]
|
| 164 |
+
text_weight = text_weight[:max_length]
|
| 165 |
+
tokens.append(text_token)
|
| 166 |
+
weights.append(text_weight)
|
| 167 |
+
if truncated:
|
| 168 |
+
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
| 169 |
+
return tokens, weights
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
| 173 |
+
r"""
|
| 174 |
+
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
| 175 |
+
"""
|
| 176 |
+
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
| 177 |
+
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
| 178 |
+
for i in range(len(tokens)):
|
| 179 |
+
tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
|
| 180 |
+
if no_boseos_middle:
|
| 181 |
+
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
| 182 |
+
else:
|
| 183 |
+
w = []
|
| 184 |
+
if len(weights[i]) == 0:
|
| 185 |
+
w = [1.0] * weights_length
|
| 186 |
+
else:
|
| 187 |
+
for j in range(max_embeddings_multiples):
|
| 188 |
+
w.append(1.0) # weight for starting token in this chunk
|
| 189 |
+
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
| 190 |
+
w.append(1.0) # weight for ending token in this chunk
|
| 191 |
+
w += [1.0] * (weights_length - len(w))
|
| 192 |
+
weights[i] = w[:]
|
| 193 |
+
|
| 194 |
+
return tokens, weights
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def get_unweighted_text_embeddings(
|
| 198 |
+
pipe: DiffusionPipeline,
|
| 199 |
+
text_input: torch.Tensor,
|
| 200 |
+
chunk_length: int,
|
| 201 |
+
no_boseos_middle: Optional[bool] = True,
|
| 202 |
+
):
|
| 203 |
+
"""
|
| 204 |
+
When the length of tokens is a multiple of the capacity of the text encoder,
|
| 205 |
+
it should be split into chunks and sent to the text encoder individually.
|
| 206 |
+
"""
|
| 207 |
+
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
| 208 |
+
if max_embeddings_multiples > 1:
|
| 209 |
+
text_embeddings = []
|
| 210 |
+
for i in range(max_embeddings_multiples):
|
| 211 |
+
# extract the i-th chunk
|
| 212 |
+
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
|
| 213 |
+
|
| 214 |
+
# cover the head and the tail by the starting and the ending tokens
|
| 215 |
+
text_input_chunk[:, 0] = text_input[0, 0]
|
| 216 |
+
text_input_chunk[:, -1] = text_input[0, -1]
|
| 217 |
+
text_embedding = pipe.text_encoder(text_input_chunk)[0]
|
| 218 |
+
|
| 219 |
+
if no_boseos_middle:
|
| 220 |
+
if i == 0:
|
| 221 |
+
# discard the ending token
|
| 222 |
+
text_embedding = text_embedding[:, :-1]
|
| 223 |
+
elif i == max_embeddings_multiples - 1:
|
| 224 |
+
# discard the starting token
|
| 225 |
+
text_embedding = text_embedding[:, 1:]
|
| 226 |
+
else:
|
| 227 |
+
# discard both starting and ending tokens
|
| 228 |
+
text_embedding = text_embedding[:, 1:-1]
|
| 229 |
+
|
| 230 |
+
text_embeddings.append(text_embedding)
|
| 231 |
+
text_embeddings = torch.concat(text_embeddings, axis=1)
|
| 232 |
+
else:
|
| 233 |
+
text_embeddings = pipe.text_encoder(text_input)[0]
|
| 234 |
+
return text_embeddings
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def get_weighted_text_embeddings(
|
| 238 |
+
pipe: DiffusionPipeline,
|
| 239 |
+
prompt: Union[str, List[str]],
|
| 240 |
+
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
| 241 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 242 |
+
no_boseos_middle: Optional[bool] = False,
|
| 243 |
+
skip_parsing: Optional[bool] = False,
|
| 244 |
+
skip_weighting: Optional[bool] = False,
|
| 245 |
+
):
|
| 246 |
+
r"""
|
| 247 |
+
Prompts can be assigned with local weights using brackets. For example,
|
| 248 |
+
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
| 249 |
+
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
| 250 |
+
|
| 251 |
+
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
pipe (`DiffusionPipeline`):
|
| 255 |
+
Pipe to provide access to the tokenizer and the text encoder.
|
| 256 |
+
prompt (`str` or `List[str]`):
|
| 257 |
+
The prompt or prompts to guide the image generation.
|
| 258 |
+
uncond_prompt (`str` or `List[str]`):
|
| 259 |
+
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
| 260 |
+
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
| 261 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 262 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 263 |
+
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
| 264 |
+
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
| 265 |
+
ending token in each of the chunk in the middle.
|
| 266 |
+
skip_parsing (`bool`, *optional*, defaults to `False`):
|
| 267 |
+
Skip the parsing of brackets.
|
| 268 |
+
skip_weighting (`bool`, *optional*, defaults to `False`):
|
| 269 |
+
Skip the weighting. When the parsing is skipped, it is forced True.
|
| 270 |
+
"""
|
| 271 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 272 |
+
if isinstance(prompt, str):
|
| 273 |
+
prompt = [prompt]
|
| 274 |
+
|
| 275 |
+
if not skip_parsing:
|
| 276 |
+
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
| 277 |
+
if uncond_prompt is not None:
|
| 278 |
+
if isinstance(uncond_prompt, str):
|
| 279 |
+
uncond_prompt = [uncond_prompt]
|
| 280 |
+
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
| 281 |
+
else:
|
| 282 |
+
prompt_tokens = [
|
| 283 |
+
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
|
| 284 |
+
]
|
| 285 |
+
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
| 286 |
+
if uncond_prompt is not None:
|
| 287 |
+
if isinstance(uncond_prompt, str):
|
| 288 |
+
uncond_prompt = [uncond_prompt]
|
| 289 |
+
uncond_tokens = [
|
| 290 |
+
token[1:-1]
|
| 291 |
+
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
|
| 292 |
+
]
|
| 293 |
+
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
| 294 |
+
|
| 295 |
+
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
| 296 |
+
max_length = max([len(token) for token in prompt_tokens])
|
| 297 |
+
if uncond_prompt is not None:
|
| 298 |
+
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
| 299 |
+
|
| 300 |
+
max_embeddings_multiples = min(
|
| 301 |
+
max_embeddings_multiples,
|
| 302 |
+
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
| 303 |
+
)
|
| 304 |
+
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
| 305 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 306 |
+
|
| 307 |
+
# pad the length of tokens and weights
|
| 308 |
+
bos = pipe.tokenizer.bos_token_id
|
| 309 |
+
eos = pipe.tokenizer.eos_token_id
|
| 310 |
+
pad = getattr(pipe.tokenizer, "pad_token_id", eos)
|
| 311 |
+
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
| 312 |
+
prompt_tokens,
|
| 313 |
+
prompt_weights,
|
| 314 |
+
max_length,
|
| 315 |
+
bos,
|
| 316 |
+
eos,
|
| 317 |
+
pad,
|
| 318 |
+
no_boseos_middle=no_boseos_middle,
|
| 319 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 320 |
+
)
|
| 321 |
+
prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
|
| 322 |
+
if uncond_prompt is not None:
|
| 323 |
+
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
| 324 |
+
uncond_tokens,
|
| 325 |
+
uncond_weights,
|
| 326 |
+
max_length,
|
| 327 |
+
bos,
|
| 328 |
+
eos,
|
| 329 |
+
pad,
|
| 330 |
+
no_boseos_middle=no_boseos_middle,
|
| 331 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 332 |
+
)
|
| 333 |
+
uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
|
| 334 |
+
|
| 335 |
+
# get the embeddings
|
| 336 |
+
text_embeddings = get_unweighted_text_embeddings(
|
| 337 |
+
pipe,
|
| 338 |
+
prompt_tokens,
|
| 339 |
+
pipe.tokenizer.model_max_length,
|
| 340 |
+
no_boseos_middle=no_boseos_middle,
|
| 341 |
+
)
|
| 342 |
+
prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
|
| 343 |
+
if uncond_prompt is not None:
|
| 344 |
+
uncond_embeddings = get_unweighted_text_embeddings(
|
| 345 |
+
pipe,
|
| 346 |
+
uncond_tokens,
|
| 347 |
+
pipe.tokenizer.model_max_length,
|
| 348 |
+
no_boseos_middle=no_boseos_middle,
|
| 349 |
+
)
|
| 350 |
+
uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
|
| 351 |
+
|
| 352 |
+
# assign weights to the prompts and normalize in the sense of mean
|
| 353 |
+
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
| 354 |
+
if (not skip_parsing) and (not skip_weighting):
|
| 355 |
+
previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
|
| 356 |
+
text_embeddings *= prompt_weights.unsqueeze(-1)
|
| 357 |
+
current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
|
| 358 |
+
text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
|
| 359 |
+
if uncond_prompt is not None:
|
| 360 |
+
previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
|
| 361 |
+
uncond_embeddings *= uncond_weights.unsqueeze(-1)
|
| 362 |
+
current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
|
| 363 |
+
uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
|
| 364 |
+
|
| 365 |
+
if uncond_prompt is not None:
|
| 366 |
+
return text_embeddings, uncond_embeddings
|
| 367 |
+
return text_embeddings, None
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def preprocess_image(image, batch_size):
|
| 371 |
+
w, h = image.size
|
| 372 |
+
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
| 373 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 374 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 375 |
+
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
|
| 376 |
+
image = torch.from_numpy(image)
|
| 377 |
+
return 2.0 * image - 1.0
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def preprocess_mask(mask, batch_size, scale_factor=8):
|
| 381 |
+
if not isinstance(mask, torch.FloatTensor):
|
| 382 |
+
mask = mask.convert("L")
|
| 383 |
+
w, h = mask.size
|
| 384 |
+
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
| 385 |
+
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
| 386 |
+
mask = np.array(mask).astype(np.float32) / 255.0
|
| 387 |
+
mask = np.tile(mask, (4, 1, 1))
|
| 388 |
+
mask = np.vstack([mask[None]] * batch_size)
|
| 389 |
+
mask = 1 - mask # repaint white, keep black
|
| 390 |
+
mask = torch.from_numpy(mask)
|
| 391 |
+
return mask
|
| 392 |
+
|
| 393 |
+
else:
|
| 394 |
+
valid_mask_channel_sizes = [1, 3]
|
| 395 |
+
# if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
|
| 396 |
+
if mask.shape[3] in valid_mask_channel_sizes:
|
| 397 |
+
mask = mask.permute(0, 3, 1, 2)
|
| 398 |
+
elif mask.shape[1] not in valid_mask_channel_sizes:
|
| 399 |
+
raise ValueError(
|
| 400 |
+
f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
|
| 401 |
+
f" but received mask of shape {tuple(mask.shape)}"
|
| 402 |
+
)
|
| 403 |
+
# (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
|
| 404 |
+
mask = mask.mean(dim=1, keepdim=True)
|
| 405 |
+
h, w = mask.shape[-2:]
|
| 406 |
+
h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
|
| 407 |
+
mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
|
| 408 |
+
return mask
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
class StableDiffusionLongPromptWeightingPipeline(
|
| 412 |
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 413 |
+
):
|
| 414 |
+
r"""
|
| 415 |
+
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
| 416 |
+
weighting in prompt.
|
| 417 |
+
|
| 418 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 419 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 420 |
+
|
| 421 |
+
Args:
|
| 422 |
+
vae ([`AutoencoderKL`]):
|
| 423 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 424 |
+
text_encoder ([`CLIPTextModel`]):
|
| 425 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 426 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 427 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 428 |
+
tokenizer (`CLIPTokenizer`):
|
| 429 |
+
Tokenizer of class
|
| 430 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 431 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 432 |
+
scheduler ([`SchedulerMixin`]):
|
| 433 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 434 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 435 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 436 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 437 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 438 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 439 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 440 |
+
"""
|
| 441 |
+
|
| 442 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 443 |
+
|
| 444 |
+
def __init__(
|
| 445 |
+
self,
|
| 446 |
+
vae: AutoencoderKL,
|
| 447 |
+
text_encoder: CLIPTextModel,
|
| 448 |
+
tokenizer: CLIPTokenizer,
|
| 449 |
+
unet: UNet2DConditionModel,
|
| 450 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 451 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 452 |
+
feature_extractor: CLIPImageProcessor,
|
| 453 |
+
requires_safety_checker: bool = True,
|
| 454 |
+
):
|
| 455 |
+
super().__init__()
|
| 456 |
+
|
| 457 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 458 |
+
deprecation_message = (
|
| 459 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 460 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 461 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 462 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 463 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 464 |
+
" file"
|
| 465 |
+
)
|
| 466 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 467 |
+
new_config = dict(scheduler.config)
|
| 468 |
+
new_config["steps_offset"] = 1
|
| 469 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 470 |
+
|
| 471 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 472 |
+
deprecation_message = (
|
| 473 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 474 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 475 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 476 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 477 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 478 |
+
)
|
| 479 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 480 |
+
new_config = dict(scheduler.config)
|
| 481 |
+
new_config["clip_sample"] = False
|
| 482 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 483 |
+
|
| 484 |
+
if safety_checker is None and requires_safety_checker:
|
| 485 |
+
logger.warning(
|
| 486 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 487 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 488 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 489 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 490 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 491 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
if safety_checker is not None and feature_extractor is None:
|
| 495 |
+
raise ValueError(
|
| 496 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 497 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 501 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 502 |
+
) < version.parse("0.9.0.dev0")
|
| 503 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 504 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 505 |
+
deprecation_message = (
|
| 506 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 507 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 508 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 509 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 510 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 511 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 512 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 513 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 514 |
+
" the `unet/config.json` file"
|
| 515 |
+
)
|
| 516 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 517 |
+
new_config = dict(unet.config)
|
| 518 |
+
new_config["sample_size"] = 64
|
| 519 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 520 |
+
self.register_modules(
|
| 521 |
+
vae=vae,
|
| 522 |
+
text_encoder=text_encoder,
|
| 523 |
+
tokenizer=tokenizer,
|
| 524 |
+
unet=unet,
|
| 525 |
+
scheduler=scheduler,
|
| 526 |
+
safety_checker=safety_checker,
|
| 527 |
+
feature_extractor=feature_extractor,
|
| 528 |
+
)
|
| 529 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 530 |
+
|
| 531 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 532 |
+
self.register_to_config(
|
| 533 |
+
requires_safety_checker=requires_safety_checker,
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
def _encode_prompt(
|
| 537 |
+
self,
|
| 538 |
+
prompt,
|
| 539 |
+
device,
|
| 540 |
+
num_images_per_prompt,
|
| 541 |
+
do_classifier_free_guidance,
|
| 542 |
+
negative_prompt=None,
|
| 543 |
+
max_embeddings_multiples=3,
|
| 544 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 545 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 546 |
+
):
|
| 547 |
+
r"""
|
| 548 |
+
Encodes the prompt into text encoder hidden states.
|
| 549 |
+
|
| 550 |
+
Args:
|
| 551 |
+
prompt (`str` or `list(int)`):
|
| 552 |
+
prompt to be encoded
|
| 553 |
+
device: (`torch.device`):
|
| 554 |
+
torch device
|
| 555 |
+
num_images_per_prompt (`int`):
|
| 556 |
+
number of images that should be generated per prompt
|
| 557 |
+
do_classifier_free_guidance (`bool`):
|
| 558 |
+
whether to use classifier free guidance or not
|
| 559 |
+
negative_prompt (`str` or `List[str]`):
|
| 560 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 561 |
+
if `guidance_scale` is less than `1`).
|
| 562 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 563 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 564 |
+
"""
|
| 565 |
+
if prompt is not None and isinstance(prompt, str):
|
| 566 |
+
batch_size = 1
|
| 567 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 568 |
+
batch_size = len(prompt)
|
| 569 |
+
else:
|
| 570 |
+
batch_size = prompt_embeds.shape[0]
|
| 571 |
+
|
| 572 |
+
if negative_prompt_embeds is None:
|
| 573 |
+
if negative_prompt is None:
|
| 574 |
+
negative_prompt = [""] * batch_size
|
| 575 |
+
elif isinstance(negative_prompt, str):
|
| 576 |
+
negative_prompt = [negative_prompt] * batch_size
|
| 577 |
+
if batch_size != len(negative_prompt):
|
| 578 |
+
raise ValueError(
|
| 579 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 580 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 581 |
+
" the batch size of `prompt`."
|
| 582 |
+
)
|
| 583 |
+
if prompt_embeds is None or negative_prompt_embeds is None:
|
| 584 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 585 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 586 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 587 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
|
| 588 |
+
|
| 589 |
+
prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
|
| 590 |
+
pipe=self,
|
| 591 |
+
prompt=prompt,
|
| 592 |
+
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
| 593 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 594 |
+
)
|
| 595 |
+
if prompt_embeds is None:
|
| 596 |
+
prompt_embeds = prompt_embeds1
|
| 597 |
+
if negative_prompt_embeds is None:
|
| 598 |
+
negative_prompt_embeds = negative_prompt_embeds1
|
| 599 |
+
|
| 600 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 601 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 602 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 603 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 604 |
+
|
| 605 |
+
if do_classifier_free_guidance:
|
| 606 |
+
bs_embed, seq_len, _ = negative_prompt_embeds.shape
|
| 607 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 608 |
+
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 609 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 610 |
+
|
| 611 |
+
return prompt_embeds
|
| 612 |
+
|
| 613 |
+
def check_inputs(
|
| 614 |
+
self,
|
| 615 |
+
prompt,
|
| 616 |
+
height,
|
| 617 |
+
width,
|
| 618 |
+
strength,
|
| 619 |
+
callback_steps,
|
| 620 |
+
negative_prompt=None,
|
| 621 |
+
prompt_embeds=None,
|
| 622 |
+
negative_prompt_embeds=None,
|
| 623 |
+
):
|
| 624 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 625 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 626 |
+
|
| 627 |
+
if strength < 0 or strength > 1:
|
| 628 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 629 |
+
|
| 630 |
+
if (callback_steps is None) or (
|
| 631 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 632 |
+
):
|
| 633 |
+
raise ValueError(
|
| 634 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 635 |
+
f" {type(callback_steps)}."
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
if prompt is not None and prompt_embeds is not None:
|
| 639 |
+
raise ValueError(
|
| 640 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 641 |
+
" only forward one of the two."
|
| 642 |
+
)
|
| 643 |
+
elif prompt is None and prompt_embeds is None:
|
| 644 |
+
raise ValueError(
|
| 645 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 646 |
+
)
|
| 647 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 648 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 649 |
+
|
| 650 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 651 |
+
raise ValueError(
|
| 652 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 653 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 657 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 658 |
+
raise ValueError(
|
| 659 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 660 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 661 |
+
f" {negative_prompt_embeds.shape}."
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
|
| 665 |
+
if is_text2img:
|
| 666 |
+
return self.scheduler.timesteps.to(device), num_inference_steps
|
| 667 |
+
else:
|
| 668 |
+
# get the original timestep using init_timestep
|
| 669 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 670 |
+
|
| 671 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 672 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 673 |
+
|
| 674 |
+
return timesteps, num_inference_steps - t_start
|
| 675 |
+
|
| 676 |
+
def run_safety_checker(self, image, device, dtype):
|
| 677 |
+
if self.safety_checker is not None:
|
| 678 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 679 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 680 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 681 |
+
)
|
| 682 |
+
else:
|
| 683 |
+
has_nsfw_concept = None
|
| 684 |
+
return image, has_nsfw_concept
|
| 685 |
+
|
| 686 |
+
def decode_latents(self, latents):
|
| 687 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 688 |
+
image = self.vae.decode(latents).sample
|
| 689 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 690 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 691 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 692 |
+
return image
|
| 693 |
+
|
| 694 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 695 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 696 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 697 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 698 |
+
# and should be between [0, 1]
|
| 699 |
+
|
| 700 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 701 |
+
extra_step_kwargs = {}
|
| 702 |
+
if accepts_eta:
|
| 703 |
+
extra_step_kwargs["eta"] = eta
|
| 704 |
+
|
| 705 |
+
# check if the scheduler accepts generator
|
| 706 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 707 |
+
if accepts_generator:
|
| 708 |
+
extra_step_kwargs["generator"] = generator
|
| 709 |
+
return extra_step_kwargs
|
| 710 |
+
|
| 711 |
+
def prepare_latents(
|
| 712 |
+
self,
|
| 713 |
+
image,
|
| 714 |
+
timestep,
|
| 715 |
+
num_images_per_prompt,
|
| 716 |
+
batch_size,
|
| 717 |
+
num_channels_latents,
|
| 718 |
+
height,
|
| 719 |
+
width,
|
| 720 |
+
dtype,
|
| 721 |
+
device,
|
| 722 |
+
generator,
|
| 723 |
+
latents=None,
|
| 724 |
+
):
|
| 725 |
+
if image is None:
|
| 726 |
+
batch_size = batch_size * num_images_per_prompt
|
| 727 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 728 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 729 |
+
raise ValueError(
|
| 730 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 731 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
if latents is None:
|
| 735 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 736 |
+
else:
|
| 737 |
+
latents = latents.to(device)
|
| 738 |
+
|
| 739 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 740 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 741 |
+
return latents, None, None
|
| 742 |
+
else:
|
| 743 |
+
image = image.to(device=self.device, dtype=dtype)
|
| 744 |
+
init_latent_dist = self.vae.encode(image).latent_dist
|
| 745 |
+
init_latents = init_latent_dist.sample(generator=generator)
|
| 746 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 747 |
+
|
| 748 |
+
# Expand init_latents for batch_size and num_images_per_prompt
|
| 749 |
+
init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
|
| 750 |
+
init_latents_orig = init_latents
|
| 751 |
+
|
| 752 |
+
# add noise to latents using the timesteps
|
| 753 |
+
noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
|
| 754 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 755 |
+
latents = init_latents
|
| 756 |
+
return latents, init_latents_orig, noise
|
| 757 |
+
|
| 758 |
+
@torch.no_grad()
|
| 759 |
+
def __call__(
|
| 760 |
+
self,
|
| 761 |
+
prompt: Union[str, List[str]],
|
| 762 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 763 |
+
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 764 |
+
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 765 |
+
height: int = 512,
|
| 766 |
+
width: int = 512,
|
| 767 |
+
num_inference_steps: int = 50,
|
| 768 |
+
guidance_scale: float = 7.5,
|
| 769 |
+
strength: float = 0.8,
|
| 770 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 771 |
+
add_predicted_noise: Optional[bool] = False,
|
| 772 |
+
eta: float = 0.0,
|
| 773 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 774 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 775 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 776 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 777 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 778 |
+
output_type: Optional[str] = "pil",
|
| 779 |
+
return_dict: bool = True,
|
| 780 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 781 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 782 |
+
callback_steps: int = 1,
|
| 783 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 784 |
+
):
|
| 785 |
+
r"""
|
| 786 |
+
Function invoked when calling the pipeline for generation.
|
| 787 |
+
|
| 788 |
+
Args:
|
| 789 |
+
prompt (`str` or `List[str]`):
|
| 790 |
+
The prompt or prompts to guide the image generation.
|
| 791 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 792 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 793 |
+
if `guidance_scale` is less than `1`).
|
| 794 |
+
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 795 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 796 |
+
process.
|
| 797 |
+
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 798 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 799 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 800 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 801 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 802 |
+
height (`int`, *optional*, defaults to 512):
|
| 803 |
+
The height in pixels of the generated image.
|
| 804 |
+
width (`int`, *optional*, defaults to 512):
|
| 805 |
+
The width in pixels of the generated image.
|
| 806 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 807 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 808 |
+
expense of slower inference.
|
| 809 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 810 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 811 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 812 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 813 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 814 |
+
usually at the expense of lower image quality.
|
| 815 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 816 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 817 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 818 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 819 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 820 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 821 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 822 |
+
The number of images to generate per prompt.
|
| 823 |
+
add_predicted_noise (`bool`, *optional*, defaults to True):
|
| 824 |
+
Use predicted noise instead of random noise when constructing noisy versions of the original image in
|
| 825 |
+
the reverse diffusion process
|
| 826 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 827 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 828 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 829 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 830 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 831 |
+
to make generation deterministic.
|
| 832 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 833 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 834 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 835 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 836 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 837 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 838 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 839 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 840 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 841 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 842 |
+
argument.
|
| 843 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 844 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 845 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 846 |
+
The output format of the generate image. Choose between
|
| 847 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 848 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 849 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 850 |
+
plain tuple.
|
| 851 |
+
callback (`Callable`, *optional*):
|
| 852 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 853 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 854 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 855 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 856 |
+
`True`, the inference will be cancelled.
|
| 857 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 858 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 859 |
+
called at every step.
|
| 860 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 861 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 862 |
+
`self.processor` in
|
| 863 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 864 |
+
|
| 865 |
+
Returns:
|
| 866 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 867 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 868 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 869 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 870 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 871 |
+
(nsfw) content, according to the `safety_checker`.
|
| 872 |
+
"""
|
| 873 |
+
# 0. Default height and width to unet
|
| 874 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 875 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 876 |
+
|
| 877 |
+
# 1. Check inputs. Raise error if not correct
|
| 878 |
+
self.check_inputs(
|
| 879 |
+
prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
| 880 |
+
)
|
| 881 |
+
|
| 882 |
+
# 2. Define call parameters
|
| 883 |
+
if prompt is not None and isinstance(prompt, str):
|
| 884 |
+
batch_size = 1
|
| 885 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 886 |
+
batch_size = len(prompt)
|
| 887 |
+
else:
|
| 888 |
+
batch_size = prompt_embeds.shape[0]
|
| 889 |
+
|
| 890 |
+
device = self._execution_device
|
| 891 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 892 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 893 |
+
# corresponds to doing no classifier free guidance.
|
| 894 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 895 |
+
|
| 896 |
+
# 3. Encode input prompt
|
| 897 |
+
prompt_embeds = self._encode_prompt(
|
| 898 |
+
prompt,
|
| 899 |
+
device,
|
| 900 |
+
num_images_per_prompt,
|
| 901 |
+
do_classifier_free_guidance,
|
| 902 |
+
negative_prompt,
|
| 903 |
+
max_embeddings_multiples,
|
| 904 |
+
prompt_embeds=prompt_embeds,
|
| 905 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 906 |
+
)
|
| 907 |
+
dtype = prompt_embeds.dtype
|
| 908 |
+
|
| 909 |
+
# 4. Preprocess image and mask
|
| 910 |
+
if isinstance(image, PIL.Image.Image):
|
| 911 |
+
image = preprocess_image(image, batch_size)
|
| 912 |
+
if image is not None:
|
| 913 |
+
image = image.to(device=self.device, dtype=dtype)
|
| 914 |
+
if isinstance(mask_image, PIL.Image.Image):
|
| 915 |
+
mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
|
| 916 |
+
if mask_image is not None:
|
| 917 |
+
mask = mask_image.to(device=self.device, dtype=dtype)
|
| 918 |
+
mask = torch.cat([mask] * num_images_per_prompt)
|
| 919 |
+
else:
|
| 920 |
+
mask = None
|
| 921 |
+
|
| 922 |
+
# 5. set timesteps
|
| 923 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 924 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
|
| 925 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 926 |
+
|
| 927 |
+
# 6. Prepare latent variables
|
| 928 |
+
latents, init_latents_orig, noise = self.prepare_latents(
|
| 929 |
+
image,
|
| 930 |
+
latent_timestep,
|
| 931 |
+
num_images_per_prompt,
|
| 932 |
+
batch_size,
|
| 933 |
+
self.unet.config.in_channels,
|
| 934 |
+
height,
|
| 935 |
+
width,
|
| 936 |
+
dtype,
|
| 937 |
+
device,
|
| 938 |
+
generator,
|
| 939 |
+
latents,
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 943 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 944 |
+
|
| 945 |
+
# 8. Denoising loop
|
| 946 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 947 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 948 |
+
for i, t in enumerate(timesteps):
|
| 949 |
+
# expand the latents if we are doing classifier free guidance
|
| 950 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 951 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 952 |
+
|
| 953 |
+
# predict the noise residual
|
| 954 |
+
noise_pred = self.unet(
|
| 955 |
+
latent_model_input,
|
| 956 |
+
t,
|
| 957 |
+
encoder_hidden_states=prompt_embeds,
|
| 958 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 959 |
+
).sample
|
| 960 |
+
|
| 961 |
+
# perform guidance
|
| 962 |
+
if do_classifier_free_guidance:
|
| 963 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 964 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 965 |
+
|
| 966 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 967 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 968 |
+
|
| 969 |
+
if mask is not None:
|
| 970 |
+
# masking
|
| 971 |
+
if add_predicted_noise:
|
| 972 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 973 |
+
init_latents_orig, noise_pred_uncond, torch.tensor([t])
|
| 974 |
+
)
|
| 975 |
+
else:
|
| 976 |
+
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
|
| 977 |
+
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
| 978 |
+
|
| 979 |
+
# call the callback, if provided
|
| 980 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 981 |
+
progress_bar.update()
|
| 982 |
+
if i % callback_steps == 0:
|
| 983 |
+
if callback is not None:
|
| 984 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 985 |
+
callback(step_idx, t, latents)
|
| 986 |
+
if is_cancelled_callback is not None and is_cancelled_callback():
|
| 987 |
+
return None
|
| 988 |
+
|
| 989 |
+
if output_type == "latent":
|
| 990 |
+
image = latents
|
| 991 |
+
has_nsfw_concept = None
|
| 992 |
+
elif output_type == "pil":
|
| 993 |
+
# 9. Post-processing
|
| 994 |
+
image = self.decode_latents(latents)
|
| 995 |
+
|
| 996 |
+
# 10. Run safety checker
|
| 997 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 998 |
+
|
| 999 |
+
# 11. Convert to PIL
|
| 1000 |
+
image = self.numpy_to_pil(image)
|
| 1001 |
+
else:
|
| 1002 |
+
# 9. Post-processing
|
| 1003 |
+
image = self.decode_latents(latents)
|
| 1004 |
+
|
| 1005 |
+
# 10. Run safety checker
|
| 1006 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1007 |
+
|
| 1008 |
+
# Offload last model to CPU
|
| 1009 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1010 |
+
self.final_offload_hook.offload()
|
| 1011 |
+
|
| 1012 |
+
if not return_dict:
|
| 1013 |
+
return image, has_nsfw_concept
|
| 1014 |
+
|
| 1015 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1016 |
+
|
| 1017 |
+
def text2img(
|
| 1018 |
+
self,
|
| 1019 |
+
prompt: Union[str, List[str]],
|
| 1020 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1021 |
+
height: int = 512,
|
| 1022 |
+
width: int = 512,
|
| 1023 |
+
num_inference_steps: int = 50,
|
| 1024 |
+
guidance_scale: float = 7.5,
|
| 1025 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1026 |
+
eta: float = 0.0,
|
| 1027 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1028 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 1029 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1030 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1031 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1032 |
+
output_type: Optional[str] = "pil",
|
| 1033 |
+
return_dict: bool = True,
|
| 1034 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 1035 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1036 |
+
callback_steps: int = 1,
|
| 1037 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1038 |
+
):
|
| 1039 |
+
r"""
|
| 1040 |
+
Function for text-to-image generation.
|
| 1041 |
+
Args:
|
| 1042 |
+
prompt (`str` or `List[str]`):
|
| 1043 |
+
The prompt or prompts to guide the image generation.
|
| 1044 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1045 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1046 |
+
if `guidance_scale` is less than `1`).
|
| 1047 |
+
height (`int`, *optional*, defaults to 512):
|
| 1048 |
+
The height in pixels of the generated image.
|
| 1049 |
+
width (`int`, *optional*, defaults to 512):
|
| 1050 |
+
The width in pixels of the generated image.
|
| 1051 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1052 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1053 |
+
expense of slower inference.
|
| 1054 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1055 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1056 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1057 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1058 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1059 |
+
usually at the expense of lower image quality.
|
| 1060 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1061 |
+
The number of images to generate per prompt.
|
| 1062 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1063 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1064 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1065 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1066 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1067 |
+
to make generation deterministic.
|
| 1068 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 1069 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1070 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1071 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 1072 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1073 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1074 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1075 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1076 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1077 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1078 |
+
argument.
|
| 1079 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1080 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1081 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1082 |
+
The output format of the generate image. Choose between
|
| 1083 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1084 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1085 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1086 |
+
plain tuple.
|
| 1087 |
+
callback (`Callable`, *optional*):
|
| 1088 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1089 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 1090 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1091 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1092 |
+
`True`, the inference will be cancelled.
|
| 1093 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1094 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1095 |
+
called at every step.
|
| 1096 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1097 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1098 |
+
`self.processor` in
|
| 1099 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1100 |
+
|
| 1101 |
+
Returns:
|
| 1102 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1103 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1104 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1105 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1106 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1107 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1108 |
+
"""
|
| 1109 |
+
return self.__call__(
|
| 1110 |
+
prompt=prompt,
|
| 1111 |
+
negative_prompt=negative_prompt,
|
| 1112 |
+
height=height,
|
| 1113 |
+
width=width,
|
| 1114 |
+
num_inference_steps=num_inference_steps,
|
| 1115 |
+
guidance_scale=guidance_scale,
|
| 1116 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1117 |
+
eta=eta,
|
| 1118 |
+
generator=generator,
|
| 1119 |
+
latents=latents,
|
| 1120 |
+
prompt_embeds=prompt_embeds,
|
| 1121 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1122 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1123 |
+
output_type=output_type,
|
| 1124 |
+
return_dict=return_dict,
|
| 1125 |
+
callback=callback,
|
| 1126 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1127 |
+
callback_steps=callback_steps,
|
| 1128 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1129 |
+
)
|
| 1130 |
+
|
| 1131 |
+
def img2img(
|
| 1132 |
+
self,
|
| 1133 |
+
image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 1134 |
+
prompt: Union[str, List[str]],
|
| 1135 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1136 |
+
strength: float = 0.8,
|
| 1137 |
+
num_inference_steps: Optional[int] = 50,
|
| 1138 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1139 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1140 |
+
eta: Optional[float] = 0.0,
|
| 1141 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1142 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1143 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1144 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1145 |
+
output_type: Optional[str] = "pil",
|
| 1146 |
+
return_dict: bool = True,
|
| 1147 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 1148 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1149 |
+
callback_steps: int = 1,
|
| 1150 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1151 |
+
):
|
| 1152 |
+
r"""
|
| 1153 |
+
Function for image-to-image generation.
|
| 1154 |
+
Args:
|
| 1155 |
+
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 1156 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1157 |
+
process.
|
| 1158 |
+
prompt (`str` or `List[str]`):
|
| 1159 |
+
The prompt or prompts to guide the image generation.
|
| 1160 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1161 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1162 |
+
if `guidance_scale` is less than `1`).
|
| 1163 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1164 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 1165 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 1166 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 1167 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 1168 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 1169 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1170 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1171 |
+
expense of slower inference. This parameter will be modulated by `strength`.
|
| 1172 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1173 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1174 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1175 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1176 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1177 |
+
usually at the expense of lower image quality.
|
| 1178 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1179 |
+
The number of images to generate per prompt.
|
| 1180 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1181 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1182 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1183 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1184 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1185 |
+
to make generation deterministic.
|
| 1186 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1187 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1188 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1189 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1190 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1191 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1192 |
+
argument.
|
| 1193 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1194 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1195 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1196 |
+
The output format of the generate image. Choose between
|
| 1197 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1198 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1199 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1200 |
+
plain tuple.
|
| 1201 |
+
callback (`Callable`, *optional*):
|
| 1202 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1203 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 1204 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1205 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1206 |
+
`True`, the inference will be cancelled.
|
| 1207 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1208 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1209 |
+
called at every step.
|
| 1210 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1211 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1212 |
+
`self.processor` in
|
| 1213 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1214 |
+
|
| 1215 |
+
Returns:
|
| 1216 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1217 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1218 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1219 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1220 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1221 |
+
"""
|
| 1222 |
+
return self.__call__(
|
| 1223 |
+
prompt=prompt,
|
| 1224 |
+
negative_prompt=negative_prompt,
|
| 1225 |
+
image=image,
|
| 1226 |
+
num_inference_steps=num_inference_steps,
|
| 1227 |
+
guidance_scale=guidance_scale,
|
| 1228 |
+
strength=strength,
|
| 1229 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1230 |
+
eta=eta,
|
| 1231 |
+
generator=generator,
|
| 1232 |
+
prompt_embeds=prompt_embeds,
|
| 1233 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1234 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1235 |
+
output_type=output_type,
|
| 1236 |
+
return_dict=return_dict,
|
| 1237 |
+
callback=callback,
|
| 1238 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1239 |
+
callback_steps=callback_steps,
|
| 1240 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1241 |
+
)
|
| 1242 |
+
|
| 1243 |
+
def inpaint(
|
| 1244 |
+
self,
|
| 1245 |
+
image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 1246 |
+
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
|
| 1247 |
+
prompt: Union[str, List[str]],
|
| 1248 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1249 |
+
strength: float = 0.8,
|
| 1250 |
+
num_inference_steps: Optional[int] = 50,
|
| 1251 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1252 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1253 |
+
add_predicted_noise: Optional[bool] = False,
|
| 1254 |
+
eta: Optional[float] = 0.0,
|
| 1255 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1256 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1257 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1258 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1259 |
+
output_type: Optional[str] = "pil",
|
| 1260 |
+
return_dict: bool = True,
|
| 1261 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 1262 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1263 |
+
callback_steps: int = 1,
|
| 1264 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1265 |
+
):
|
| 1266 |
+
r"""
|
| 1267 |
+
Function for inpaint.
|
| 1268 |
+
Args:
|
| 1269 |
+
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 1270 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1271 |
+
process. This is the image whose masked region will be inpainted.
|
| 1272 |
+
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
| 1273 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1274 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 1275 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 1276 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1277 |
+
prompt (`str` or `List[str]`):
|
| 1278 |
+
The prompt or prompts to guide the image generation.
|
| 1279 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1280 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1281 |
+
if `guidance_scale` is less than `1`).
|
| 1282 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1283 |
+
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
| 1284 |
+
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
| 1285 |
+
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
| 1286 |
+
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
| 1287 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1288 |
+
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
| 1289 |
+
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
| 1290 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1291 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1292 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1293 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1294 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1295 |
+
usually at the expense of lower image quality.
|
| 1296 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1297 |
+
The number of images to generate per prompt.
|
| 1298 |
+
add_predicted_noise (`bool`, *optional*, defaults to True):
|
| 1299 |
+
Use predicted noise instead of random noise when constructing noisy versions of the original image in
|
| 1300 |
+
the reverse diffusion process
|
| 1301 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1302 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1303 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1304 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1305 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1306 |
+
to make generation deterministic.
|
| 1307 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1308 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1309 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1310 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1311 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1312 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1313 |
+
argument.
|
| 1314 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1315 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1316 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1317 |
+
The output format of the generate image. Choose between
|
| 1318 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1319 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1320 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1321 |
+
plain tuple.
|
| 1322 |
+
callback (`Callable`, *optional*):
|
| 1323 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1324 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 1325 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1326 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1327 |
+
`True`, the inference will be cancelled.
|
| 1328 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1329 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1330 |
+
called at every step.
|
| 1331 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1332 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1333 |
+
`self.processor` in
|
| 1334 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1335 |
+
|
| 1336 |
+
Returns:
|
| 1337 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1338 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1339 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1340 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1341 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1342 |
+
"""
|
| 1343 |
+
return self.__call__(
|
| 1344 |
+
prompt=prompt,
|
| 1345 |
+
negative_prompt=negative_prompt,
|
| 1346 |
+
image=image,
|
| 1347 |
+
mask_image=mask_image,
|
| 1348 |
+
num_inference_steps=num_inference_steps,
|
| 1349 |
+
guidance_scale=guidance_scale,
|
| 1350 |
+
strength=strength,
|
| 1351 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1352 |
+
add_predicted_noise=add_predicted_noise,
|
| 1353 |
+
eta=eta,
|
| 1354 |
+
generator=generator,
|
| 1355 |
+
prompt_embeds=prompt_embeds,
|
| 1356 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1357 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1358 |
+
output_type=output_type,
|
| 1359 |
+
return_dict=return_dict,
|
| 1360 |
+
callback=callback,
|
| 1361 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1362 |
+
callback_steps=callback_steps,
|
| 1363 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1364 |
+
)
|
v0.27.0/lpw_stable_diffusion_onnx.py
ADDED
|
@@ -0,0 +1,1148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import re
|
| 3 |
+
from typing import Callable, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from packaging import version
|
| 9 |
+
from transformers import CLIPImageProcessor, CLIPTokenizer
|
| 10 |
+
|
| 11 |
+
import diffusers
|
| 12 |
+
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 14 |
+
from diffusers.utils import logging
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
|
| 19 |
+
except ImportError:
|
| 20 |
+
ORT_TO_NP_TYPE = {
|
| 21 |
+
"tensor(bool)": np.bool_,
|
| 22 |
+
"tensor(int8)": np.int8,
|
| 23 |
+
"tensor(uint8)": np.uint8,
|
| 24 |
+
"tensor(int16)": np.int16,
|
| 25 |
+
"tensor(uint16)": np.uint16,
|
| 26 |
+
"tensor(int32)": np.int32,
|
| 27 |
+
"tensor(uint32)": np.uint32,
|
| 28 |
+
"tensor(int64)": np.int64,
|
| 29 |
+
"tensor(uint64)": np.uint64,
|
| 30 |
+
"tensor(float16)": np.float16,
|
| 31 |
+
"tensor(float)": np.float32,
|
| 32 |
+
"tensor(double)": np.float64,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
from diffusers.utils import PIL_INTERPOLATION
|
| 37 |
+
except ImportError:
|
| 38 |
+
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
| 39 |
+
PIL_INTERPOLATION = {
|
| 40 |
+
"linear": PIL.Image.Resampling.BILINEAR,
|
| 41 |
+
"bilinear": PIL.Image.Resampling.BILINEAR,
|
| 42 |
+
"bicubic": PIL.Image.Resampling.BICUBIC,
|
| 43 |
+
"lanczos": PIL.Image.Resampling.LANCZOS,
|
| 44 |
+
"nearest": PIL.Image.Resampling.NEAREST,
|
| 45 |
+
}
|
| 46 |
+
else:
|
| 47 |
+
PIL_INTERPOLATION = {
|
| 48 |
+
"linear": PIL.Image.LINEAR,
|
| 49 |
+
"bilinear": PIL.Image.BILINEAR,
|
| 50 |
+
"bicubic": PIL.Image.BICUBIC,
|
| 51 |
+
"lanczos": PIL.Image.LANCZOS,
|
| 52 |
+
"nearest": PIL.Image.NEAREST,
|
| 53 |
+
}
|
| 54 |
+
# ------------------------------------------------------------------------------
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 57 |
+
|
| 58 |
+
re_attention = re.compile(
|
| 59 |
+
r"""
|
| 60 |
+
\\\(|
|
| 61 |
+
\\\)|
|
| 62 |
+
\\\[|
|
| 63 |
+
\\]|
|
| 64 |
+
\\\\|
|
| 65 |
+
\\|
|
| 66 |
+
\(|
|
| 67 |
+
\[|
|
| 68 |
+
:([+-]?[.\d]+)\)|
|
| 69 |
+
\)|
|
| 70 |
+
]|
|
| 71 |
+
[^\\()\[\]:]+|
|
| 72 |
+
:
|
| 73 |
+
""",
|
| 74 |
+
re.X,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def parse_prompt_attention(text):
|
| 79 |
+
"""
|
| 80 |
+
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
| 81 |
+
Accepted tokens are:
|
| 82 |
+
(abc) - increases attention to abc by a multiplier of 1.1
|
| 83 |
+
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
| 84 |
+
[abc] - decreases attention to abc by a multiplier of 1.1
|
| 85 |
+
\\( - literal character '('
|
| 86 |
+
\\[ - literal character '['
|
| 87 |
+
\\) - literal character ')'
|
| 88 |
+
\\] - literal character ']'
|
| 89 |
+
\\ - literal character '\'
|
| 90 |
+
anything else - just text
|
| 91 |
+
>>> parse_prompt_attention('normal text')
|
| 92 |
+
[['normal text', 1.0]]
|
| 93 |
+
>>> parse_prompt_attention('an (important) word')
|
| 94 |
+
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
| 95 |
+
>>> parse_prompt_attention('(unbalanced')
|
| 96 |
+
[['unbalanced', 1.1]]
|
| 97 |
+
>>> parse_prompt_attention('\\(literal\\]')
|
| 98 |
+
[['(literal]', 1.0]]
|
| 99 |
+
>>> parse_prompt_attention('(unnecessary)(parens)')
|
| 100 |
+
[['unnecessaryparens', 1.1]]
|
| 101 |
+
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
| 102 |
+
[['a ', 1.0],
|
| 103 |
+
['house', 1.5730000000000004],
|
| 104 |
+
[' ', 1.1],
|
| 105 |
+
['on', 1.0],
|
| 106 |
+
[' a ', 1.1],
|
| 107 |
+
['hill', 0.55],
|
| 108 |
+
[', sun, ', 1.1],
|
| 109 |
+
['sky', 1.4641000000000006],
|
| 110 |
+
['.', 1.1]]
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
res = []
|
| 114 |
+
round_brackets = []
|
| 115 |
+
square_brackets = []
|
| 116 |
+
|
| 117 |
+
round_bracket_multiplier = 1.1
|
| 118 |
+
square_bracket_multiplier = 1 / 1.1
|
| 119 |
+
|
| 120 |
+
def multiply_range(start_position, multiplier):
|
| 121 |
+
for p in range(start_position, len(res)):
|
| 122 |
+
res[p][1] *= multiplier
|
| 123 |
+
|
| 124 |
+
for m in re_attention.finditer(text):
|
| 125 |
+
text = m.group(0)
|
| 126 |
+
weight = m.group(1)
|
| 127 |
+
|
| 128 |
+
if text.startswith("\\"):
|
| 129 |
+
res.append([text[1:], 1.0])
|
| 130 |
+
elif text == "(":
|
| 131 |
+
round_brackets.append(len(res))
|
| 132 |
+
elif text == "[":
|
| 133 |
+
square_brackets.append(len(res))
|
| 134 |
+
elif weight is not None and len(round_brackets) > 0:
|
| 135 |
+
multiply_range(round_brackets.pop(), float(weight))
|
| 136 |
+
elif text == ")" and len(round_brackets) > 0:
|
| 137 |
+
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
| 138 |
+
elif text == "]" and len(square_brackets) > 0:
|
| 139 |
+
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
| 140 |
+
else:
|
| 141 |
+
res.append([text, 1.0])
|
| 142 |
+
|
| 143 |
+
for pos in round_brackets:
|
| 144 |
+
multiply_range(pos, round_bracket_multiplier)
|
| 145 |
+
|
| 146 |
+
for pos in square_brackets:
|
| 147 |
+
multiply_range(pos, square_bracket_multiplier)
|
| 148 |
+
|
| 149 |
+
if len(res) == 0:
|
| 150 |
+
res = [["", 1.0]]
|
| 151 |
+
|
| 152 |
+
# merge runs of identical weights
|
| 153 |
+
i = 0
|
| 154 |
+
while i + 1 < len(res):
|
| 155 |
+
if res[i][1] == res[i + 1][1]:
|
| 156 |
+
res[i][0] += res[i + 1][0]
|
| 157 |
+
res.pop(i + 1)
|
| 158 |
+
else:
|
| 159 |
+
i += 1
|
| 160 |
+
|
| 161 |
+
return res
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
|
| 165 |
+
r"""
|
| 166 |
+
Tokenize a list of prompts and return its tokens with weights of each token.
|
| 167 |
+
|
| 168 |
+
No padding, starting or ending token is included.
|
| 169 |
+
"""
|
| 170 |
+
tokens = []
|
| 171 |
+
weights = []
|
| 172 |
+
truncated = False
|
| 173 |
+
for text in prompt:
|
| 174 |
+
texts_and_weights = parse_prompt_attention(text)
|
| 175 |
+
text_token = []
|
| 176 |
+
text_weight = []
|
| 177 |
+
for word, weight in texts_and_weights:
|
| 178 |
+
# tokenize and discard the starting and the ending token
|
| 179 |
+
token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
|
| 180 |
+
text_token += list(token)
|
| 181 |
+
# copy the weight by length of token
|
| 182 |
+
text_weight += [weight] * len(token)
|
| 183 |
+
# stop if the text is too long (longer than truncation limit)
|
| 184 |
+
if len(text_token) > max_length:
|
| 185 |
+
truncated = True
|
| 186 |
+
break
|
| 187 |
+
# truncate
|
| 188 |
+
if len(text_token) > max_length:
|
| 189 |
+
truncated = True
|
| 190 |
+
text_token = text_token[:max_length]
|
| 191 |
+
text_weight = text_weight[:max_length]
|
| 192 |
+
tokens.append(text_token)
|
| 193 |
+
weights.append(text_weight)
|
| 194 |
+
if truncated:
|
| 195 |
+
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
| 196 |
+
return tokens, weights
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
| 200 |
+
r"""
|
| 201 |
+
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
| 202 |
+
"""
|
| 203 |
+
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
| 204 |
+
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
| 205 |
+
for i in range(len(tokens)):
|
| 206 |
+
tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
|
| 207 |
+
if no_boseos_middle:
|
| 208 |
+
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
| 209 |
+
else:
|
| 210 |
+
w = []
|
| 211 |
+
if len(weights[i]) == 0:
|
| 212 |
+
w = [1.0] * weights_length
|
| 213 |
+
else:
|
| 214 |
+
for j in range(max_embeddings_multiples):
|
| 215 |
+
w.append(1.0) # weight for starting token in this chunk
|
| 216 |
+
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
| 217 |
+
w.append(1.0) # weight for ending token in this chunk
|
| 218 |
+
w += [1.0] * (weights_length - len(w))
|
| 219 |
+
weights[i] = w[:]
|
| 220 |
+
|
| 221 |
+
return tokens, weights
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def get_unweighted_text_embeddings(
|
| 225 |
+
pipe,
|
| 226 |
+
text_input: np.array,
|
| 227 |
+
chunk_length: int,
|
| 228 |
+
no_boseos_middle: Optional[bool] = True,
|
| 229 |
+
):
|
| 230 |
+
"""
|
| 231 |
+
When the length of tokens is a multiple of the capacity of the text encoder,
|
| 232 |
+
it should be split into chunks and sent to the text encoder individually.
|
| 233 |
+
"""
|
| 234 |
+
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
| 235 |
+
if max_embeddings_multiples > 1:
|
| 236 |
+
text_embeddings = []
|
| 237 |
+
for i in range(max_embeddings_multiples):
|
| 238 |
+
# extract the i-th chunk
|
| 239 |
+
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
|
| 240 |
+
|
| 241 |
+
# cover the head and the tail by the starting and the ending tokens
|
| 242 |
+
text_input_chunk[:, 0] = text_input[0, 0]
|
| 243 |
+
text_input_chunk[:, -1] = text_input[0, -1]
|
| 244 |
+
|
| 245 |
+
text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
|
| 246 |
+
|
| 247 |
+
if no_boseos_middle:
|
| 248 |
+
if i == 0:
|
| 249 |
+
# discard the ending token
|
| 250 |
+
text_embedding = text_embedding[:, :-1]
|
| 251 |
+
elif i == max_embeddings_multiples - 1:
|
| 252 |
+
# discard the starting token
|
| 253 |
+
text_embedding = text_embedding[:, 1:]
|
| 254 |
+
else:
|
| 255 |
+
# discard both starting and ending tokens
|
| 256 |
+
text_embedding = text_embedding[:, 1:-1]
|
| 257 |
+
|
| 258 |
+
text_embeddings.append(text_embedding)
|
| 259 |
+
text_embeddings = np.concatenate(text_embeddings, axis=1)
|
| 260 |
+
else:
|
| 261 |
+
text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
|
| 262 |
+
return text_embeddings
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def get_weighted_text_embeddings(
|
| 266 |
+
pipe,
|
| 267 |
+
prompt: Union[str, List[str]],
|
| 268 |
+
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
| 269 |
+
max_embeddings_multiples: Optional[int] = 4,
|
| 270 |
+
no_boseos_middle: Optional[bool] = False,
|
| 271 |
+
skip_parsing: Optional[bool] = False,
|
| 272 |
+
skip_weighting: Optional[bool] = False,
|
| 273 |
+
**kwargs,
|
| 274 |
+
):
|
| 275 |
+
r"""
|
| 276 |
+
Prompts can be assigned with local weights using brackets. For example,
|
| 277 |
+
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
| 278 |
+
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
| 279 |
+
|
| 280 |
+
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
pipe (`OnnxStableDiffusionPipeline`):
|
| 284 |
+
Pipe to provide access to the tokenizer and the text encoder.
|
| 285 |
+
prompt (`str` or `List[str]`):
|
| 286 |
+
The prompt or prompts to guide the image generation.
|
| 287 |
+
uncond_prompt (`str` or `List[str]`):
|
| 288 |
+
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
| 289 |
+
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
| 290 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
| 291 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 292 |
+
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
| 293 |
+
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
| 294 |
+
ending token in each of the chunk in the middle.
|
| 295 |
+
skip_parsing (`bool`, *optional*, defaults to `False`):
|
| 296 |
+
Skip the parsing of brackets.
|
| 297 |
+
skip_weighting (`bool`, *optional*, defaults to `False`):
|
| 298 |
+
Skip the weighting. When the parsing is skipped, it is forced True.
|
| 299 |
+
"""
|
| 300 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 301 |
+
if isinstance(prompt, str):
|
| 302 |
+
prompt = [prompt]
|
| 303 |
+
|
| 304 |
+
if not skip_parsing:
|
| 305 |
+
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
| 306 |
+
if uncond_prompt is not None:
|
| 307 |
+
if isinstance(uncond_prompt, str):
|
| 308 |
+
uncond_prompt = [uncond_prompt]
|
| 309 |
+
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
| 310 |
+
else:
|
| 311 |
+
prompt_tokens = [
|
| 312 |
+
token[1:-1]
|
| 313 |
+
for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
|
| 314 |
+
]
|
| 315 |
+
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
| 316 |
+
if uncond_prompt is not None:
|
| 317 |
+
if isinstance(uncond_prompt, str):
|
| 318 |
+
uncond_prompt = [uncond_prompt]
|
| 319 |
+
uncond_tokens = [
|
| 320 |
+
token[1:-1]
|
| 321 |
+
for token in pipe.tokenizer(
|
| 322 |
+
uncond_prompt,
|
| 323 |
+
max_length=max_length,
|
| 324 |
+
truncation=True,
|
| 325 |
+
return_tensors="np",
|
| 326 |
+
).input_ids
|
| 327 |
+
]
|
| 328 |
+
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
| 329 |
+
|
| 330 |
+
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
| 331 |
+
max_length = max([len(token) for token in prompt_tokens])
|
| 332 |
+
if uncond_prompt is not None:
|
| 333 |
+
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
| 334 |
+
|
| 335 |
+
max_embeddings_multiples = min(
|
| 336 |
+
max_embeddings_multiples,
|
| 337 |
+
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
| 338 |
+
)
|
| 339 |
+
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
| 340 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 341 |
+
|
| 342 |
+
# pad the length of tokens and weights
|
| 343 |
+
bos = pipe.tokenizer.bos_token_id
|
| 344 |
+
eos = pipe.tokenizer.eos_token_id
|
| 345 |
+
pad = getattr(pipe.tokenizer, "pad_token_id", eos)
|
| 346 |
+
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
| 347 |
+
prompt_tokens,
|
| 348 |
+
prompt_weights,
|
| 349 |
+
max_length,
|
| 350 |
+
bos,
|
| 351 |
+
eos,
|
| 352 |
+
pad,
|
| 353 |
+
no_boseos_middle=no_boseos_middle,
|
| 354 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 355 |
+
)
|
| 356 |
+
prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
|
| 357 |
+
if uncond_prompt is not None:
|
| 358 |
+
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
| 359 |
+
uncond_tokens,
|
| 360 |
+
uncond_weights,
|
| 361 |
+
max_length,
|
| 362 |
+
bos,
|
| 363 |
+
eos,
|
| 364 |
+
pad,
|
| 365 |
+
no_boseos_middle=no_boseos_middle,
|
| 366 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 367 |
+
)
|
| 368 |
+
uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
|
| 369 |
+
|
| 370 |
+
# get the embeddings
|
| 371 |
+
text_embeddings = get_unweighted_text_embeddings(
|
| 372 |
+
pipe,
|
| 373 |
+
prompt_tokens,
|
| 374 |
+
pipe.tokenizer.model_max_length,
|
| 375 |
+
no_boseos_middle=no_boseos_middle,
|
| 376 |
+
)
|
| 377 |
+
prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
|
| 378 |
+
if uncond_prompt is not None:
|
| 379 |
+
uncond_embeddings = get_unweighted_text_embeddings(
|
| 380 |
+
pipe,
|
| 381 |
+
uncond_tokens,
|
| 382 |
+
pipe.tokenizer.model_max_length,
|
| 383 |
+
no_boseos_middle=no_boseos_middle,
|
| 384 |
+
)
|
| 385 |
+
uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
|
| 386 |
+
|
| 387 |
+
# assign weights to the prompts and normalize in the sense of mean
|
| 388 |
+
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
| 389 |
+
if (not skip_parsing) and (not skip_weighting):
|
| 390 |
+
previous_mean = text_embeddings.mean(axis=(-2, -1))
|
| 391 |
+
text_embeddings *= prompt_weights[:, :, None]
|
| 392 |
+
text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
| 393 |
+
if uncond_prompt is not None:
|
| 394 |
+
previous_mean = uncond_embeddings.mean(axis=(-2, -1))
|
| 395 |
+
uncond_embeddings *= uncond_weights[:, :, None]
|
| 396 |
+
uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
| 397 |
+
|
| 398 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 399 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 400 |
+
# to avoid doing two forward passes
|
| 401 |
+
if uncond_prompt is not None:
|
| 402 |
+
return text_embeddings, uncond_embeddings
|
| 403 |
+
|
| 404 |
+
return text_embeddings
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def preprocess_image(image):
|
| 408 |
+
w, h = image.size
|
| 409 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 410 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 411 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 412 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 413 |
+
return 2.0 * image - 1.0
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def preprocess_mask(mask, scale_factor=8):
|
| 417 |
+
mask = mask.convert("L")
|
| 418 |
+
w, h = mask.size
|
| 419 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 420 |
+
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
| 421 |
+
mask = np.array(mask).astype(np.float32) / 255.0
|
| 422 |
+
mask = np.tile(mask, (4, 1, 1))
|
| 423 |
+
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
| 424 |
+
mask = 1 - mask # repaint white, keep black
|
| 425 |
+
return mask
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
|
| 429 |
+
r"""
|
| 430 |
+
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
| 431 |
+
weighting in prompt.
|
| 432 |
+
|
| 433 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 434 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
|
| 438 |
+
|
| 439 |
+
def __init__(
|
| 440 |
+
self,
|
| 441 |
+
vae_encoder: OnnxRuntimeModel,
|
| 442 |
+
vae_decoder: OnnxRuntimeModel,
|
| 443 |
+
text_encoder: OnnxRuntimeModel,
|
| 444 |
+
tokenizer: CLIPTokenizer,
|
| 445 |
+
unet: OnnxRuntimeModel,
|
| 446 |
+
scheduler: SchedulerMixin,
|
| 447 |
+
safety_checker: OnnxRuntimeModel,
|
| 448 |
+
feature_extractor: CLIPImageProcessor,
|
| 449 |
+
requires_safety_checker: bool = True,
|
| 450 |
+
):
|
| 451 |
+
super().__init__(
|
| 452 |
+
vae_encoder=vae_encoder,
|
| 453 |
+
vae_decoder=vae_decoder,
|
| 454 |
+
text_encoder=text_encoder,
|
| 455 |
+
tokenizer=tokenizer,
|
| 456 |
+
unet=unet,
|
| 457 |
+
scheduler=scheduler,
|
| 458 |
+
safety_checker=safety_checker,
|
| 459 |
+
feature_extractor=feature_extractor,
|
| 460 |
+
requires_safety_checker=requires_safety_checker,
|
| 461 |
+
)
|
| 462 |
+
self.__init__additional__()
|
| 463 |
+
|
| 464 |
+
else:
|
| 465 |
+
|
| 466 |
+
def __init__(
|
| 467 |
+
self,
|
| 468 |
+
vae_encoder: OnnxRuntimeModel,
|
| 469 |
+
vae_decoder: OnnxRuntimeModel,
|
| 470 |
+
text_encoder: OnnxRuntimeModel,
|
| 471 |
+
tokenizer: CLIPTokenizer,
|
| 472 |
+
unet: OnnxRuntimeModel,
|
| 473 |
+
scheduler: SchedulerMixin,
|
| 474 |
+
safety_checker: OnnxRuntimeModel,
|
| 475 |
+
feature_extractor: CLIPImageProcessor,
|
| 476 |
+
):
|
| 477 |
+
super().__init__(
|
| 478 |
+
vae_encoder=vae_encoder,
|
| 479 |
+
vae_decoder=vae_decoder,
|
| 480 |
+
text_encoder=text_encoder,
|
| 481 |
+
tokenizer=tokenizer,
|
| 482 |
+
unet=unet,
|
| 483 |
+
scheduler=scheduler,
|
| 484 |
+
safety_checker=safety_checker,
|
| 485 |
+
feature_extractor=feature_extractor,
|
| 486 |
+
)
|
| 487 |
+
self.__init__additional__()
|
| 488 |
+
|
| 489 |
+
def __init__additional__(self):
|
| 490 |
+
self.unet.config.in_channels = 4
|
| 491 |
+
self.vae_scale_factor = 8
|
| 492 |
+
|
| 493 |
+
def _encode_prompt(
|
| 494 |
+
self,
|
| 495 |
+
prompt,
|
| 496 |
+
num_images_per_prompt,
|
| 497 |
+
do_classifier_free_guidance,
|
| 498 |
+
negative_prompt,
|
| 499 |
+
max_embeddings_multiples,
|
| 500 |
+
):
|
| 501 |
+
r"""
|
| 502 |
+
Encodes the prompt into text encoder hidden states.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
prompt (`str` or `list(int)`):
|
| 506 |
+
prompt to be encoded
|
| 507 |
+
num_images_per_prompt (`int`):
|
| 508 |
+
number of images that should be generated per prompt
|
| 509 |
+
do_classifier_free_guidance (`bool`):
|
| 510 |
+
whether to use classifier free guidance or not
|
| 511 |
+
negative_prompt (`str` or `List[str]`):
|
| 512 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 513 |
+
if `guidance_scale` is less than `1`).
|
| 514 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 515 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 516 |
+
"""
|
| 517 |
+
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
| 518 |
+
|
| 519 |
+
if negative_prompt is None:
|
| 520 |
+
negative_prompt = [""] * batch_size
|
| 521 |
+
elif isinstance(negative_prompt, str):
|
| 522 |
+
negative_prompt = [negative_prompt] * batch_size
|
| 523 |
+
if batch_size != len(negative_prompt):
|
| 524 |
+
raise ValueError(
|
| 525 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 526 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 527 |
+
" the batch size of `prompt`."
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
|
| 531 |
+
pipe=self,
|
| 532 |
+
prompt=prompt,
|
| 533 |
+
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
| 534 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
|
| 538 |
+
if do_classifier_free_guidance:
|
| 539 |
+
uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
|
| 540 |
+
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
|
| 541 |
+
|
| 542 |
+
return text_embeddings
|
| 543 |
+
|
| 544 |
+
def check_inputs(self, prompt, height, width, strength, callback_steps):
|
| 545 |
+
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
| 546 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 547 |
+
|
| 548 |
+
if strength < 0 or strength > 1:
|
| 549 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 550 |
+
|
| 551 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 552 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 553 |
+
|
| 554 |
+
if (callback_steps is None) or (
|
| 555 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 556 |
+
):
|
| 557 |
+
raise ValueError(
|
| 558 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 559 |
+
f" {type(callback_steps)}."
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
def get_timesteps(self, num_inference_steps, strength, is_text2img):
|
| 563 |
+
if is_text2img:
|
| 564 |
+
return self.scheduler.timesteps, num_inference_steps
|
| 565 |
+
else:
|
| 566 |
+
# get the original timestep using init_timestep
|
| 567 |
+
offset = self.scheduler.config.get("steps_offset", 0)
|
| 568 |
+
init_timestep = int(num_inference_steps * strength) + offset
|
| 569 |
+
init_timestep = min(init_timestep, num_inference_steps)
|
| 570 |
+
|
| 571 |
+
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
| 572 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 573 |
+
return timesteps, num_inference_steps - t_start
|
| 574 |
+
|
| 575 |
+
def run_safety_checker(self, image):
|
| 576 |
+
if self.safety_checker is not None:
|
| 577 |
+
safety_checker_input = self.feature_extractor(
|
| 578 |
+
self.numpy_to_pil(image), return_tensors="np"
|
| 579 |
+
).pixel_values.astype(image.dtype)
|
| 580 |
+
# There will throw an error if use safety_checker directly and batchsize>1
|
| 581 |
+
images, has_nsfw_concept = [], []
|
| 582 |
+
for i in range(image.shape[0]):
|
| 583 |
+
image_i, has_nsfw_concept_i = self.safety_checker(
|
| 584 |
+
clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
|
| 585 |
+
)
|
| 586 |
+
images.append(image_i)
|
| 587 |
+
has_nsfw_concept.append(has_nsfw_concept_i[0])
|
| 588 |
+
image = np.concatenate(images)
|
| 589 |
+
else:
|
| 590 |
+
has_nsfw_concept = None
|
| 591 |
+
return image, has_nsfw_concept
|
| 592 |
+
|
| 593 |
+
def decode_latents(self, latents):
|
| 594 |
+
latents = 1 / 0.18215 * latents
|
| 595 |
+
# image = self.vae_decoder(latent_sample=latents)[0]
|
| 596 |
+
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
|
| 597 |
+
image = np.concatenate(
|
| 598 |
+
[self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
|
| 599 |
+
)
|
| 600 |
+
image = np.clip(image / 2 + 0.5, 0, 1)
|
| 601 |
+
image = image.transpose((0, 2, 3, 1))
|
| 602 |
+
return image
|
| 603 |
+
|
| 604 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 605 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 606 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 607 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 608 |
+
# and should be between [0, 1]
|
| 609 |
+
|
| 610 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 611 |
+
extra_step_kwargs = {}
|
| 612 |
+
if accepts_eta:
|
| 613 |
+
extra_step_kwargs["eta"] = eta
|
| 614 |
+
|
| 615 |
+
# check if the scheduler accepts generator
|
| 616 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 617 |
+
if accepts_generator:
|
| 618 |
+
extra_step_kwargs["generator"] = generator
|
| 619 |
+
return extra_step_kwargs
|
| 620 |
+
|
| 621 |
+
def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
|
| 622 |
+
if image is None:
|
| 623 |
+
shape = (
|
| 624 |
+
batch_size,
|
| 625 |
+
self.unet.config.in_channels,
|
| 626 |
+
height // self.vae_scale_factor,
|
| 627 |
+
width // self.vae_scale_factor,
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
if latents is None:
|
| 631 |
+
latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
| 632 |
+
else:
|
| 633 |
+
if latents.shape != shape:
|
| 634 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 635 |
+
|
| 636 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 637 |
+
latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
|
| 638 |
+
return latents, None, None
|
| 639 |
+
else:
|
| 640 |
+
init_latents = self.vae_encoder(sample=image)[0]
|
| 641 |
+
init_latents = 0.18215 * init_latents
|
| 642 |
+
init_latents = np.concatenate([init_latents] * batch_size, axis=0)
|
| 643 |
+
init_latents_orig = init_latents
|
| 644 |
+
shape = init_latents.shape
|
| 645 |
+
|
| 646 |
+
# add noise to latents using the timesteps
|
| 647 |
+
noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
| 648 |
+
latents = self.scheduler.add_noise(
|
| 649 |
+
torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
|
| 650 |
+
).numpy()
|
| 651 |
+
return latents, init_latents_orig, noise
|
| 652 |
+
|
| 653 |
+
@torch.no_grad()
|
| 654 |
+
def __call__(
|
| 655 |
+
self,
|
| 656 |
+
prompt: Union[str, List[str]],
|
| 657 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 658 |
+
image: Union[np.ndarray, PIL.Image.Image] = None,
|
| 659 |
+
mask_image: Union[np.ndarray, PIL.Image.Image] = None,
|
| 660 |
+
height: int = 512,
|
| 661 |
+
width: int = 512,
|
| 662 |
+
num_inference_steps: int = 50,
|
| 663 |
+
guidance_scale: float = 7.5,
|
| 664 |
+
strength: float = 0.8,
|
| 665 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 666 |
+
eta: float = 0.0,
|
| 667 |
+
generator: Optional[torch.Generator] = None,
|
| 668 |
+
latents: Optional[np.ndarray] = None,
|
| 669 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 670 |
+
output_type: Optional[str] = "pil",
|
| 671 |
+
return_dict: bool = True,
|
| 672 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 673 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 674 |
+
callback_steps: int = 1,
|
| 675 |
+
**kwargs,
|
| 676 |
+
):
|
| 677 |
+
r"""
|
| 678 |
+
Function invoked when calling the pipeline for generation.
|
| 679 |
+
|
| 680 |
+
Args:
|
| 681 |
+
prompt (`str` or `List[str]`):
|
| 682 |
+
The prompt or prompts to guide the image generation.
|
| 683 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 684 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 685 |
+
if `guidance_scale` is less than `1`).
|
| 686 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 687 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 688 |
+
process.
|
| 689 |
+
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
| 690 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 691 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 692 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 693 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 694 |
+
height (`int`, *optional*, defaults to 512):
|
| 695 |
+
The height in pixels of the generated image.
|
| 696 |
+
width (`int`, *optional*, defaults to 512):
|
| 697 |
+
The width in pixels of the generated image.
|
| 698 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 699 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 700 |
+
expense of slower inference.
|
| 701 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 702 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 703 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 704 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 705 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 706 |
+
usually at the expense of lower image quality.
|
| 707 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 708 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 709 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 710 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 711 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 712 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 713 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 714 |
+
The number of images to generate per prompt.
|
| 715 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 716 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 717 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 718 |
+
generator (`torch.Generator`, *optional*):
|
| 719 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 720 |
+
deterministic.
|
| 721 |
+
latents (`np.ndarray`, *optional*):
|
| 722 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 723 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 724 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 725 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 726 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 727 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 728 |
+
The output format of the generate image. Choose between
|
| 729 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 730 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 731 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 732 |
+
plain tuple.
|
| 733 |
+
callback (`Callable`, *optional*):
|
| 734 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 735 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 736 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 737 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 738 |
+
`True`, the inference will be cancelled.
|
| 739 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 740 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 741 |
+
called at every step.
|
| 742 |
+
|
| 743 |
+
Returns:
|
| 744 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 745 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 746 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 747 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 748 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 749 |
+
(nsfw) content, according to the `safety_checker`.
|
| 750 |
+
"""
|
| 751 |
+
# 0. Default height and width to unet
|
| 752 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 753 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 754 |
+
|
| 755 |
+
# 1. Check inputs. Raise error if not correct
|
| 756 |
+
self.check_inputs(prompt, height, width, strength, callback_steps)
|
| 757 |
+
|
| 758 |
+
# 2. Define call parameters
|
| 759 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
| 760 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 761 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 762 |
+
# corresponds to doing no classifier free guidance.
|
| 763 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 764 |
+
|
| 765 |
+
# 3. Encode input prompt
|
| 766 |
+
text_embeddings = self._encode_prompt(
|
| 767 |
+
prompt,
|
| 768 |
+
num_images_per_prompt,
|
| 769 |
+
do_classifier_free_guidance,
|
| 770 |
+
negative_prompt,
|
| 771 |
+
max_embeddings_multiples,
|
| 772 |
+
)
|
| 773 |
+
dtype = text_embeddings.dtype
|
| 774 |
+
|
| 775 |
+
# 4. Preprocess image and mask
|
| 776 |
+
if isinstance(image, PIL.Image.Image):
|
| 777 |
+
image = preprocess_image(image)
|
| 778 |
+
if image is not None:
|
| 779 |
+
image = image.astype(dtype)
|
| 780 |
+
if isinstance(mask_image, PIL.Image.Image):
|
| 781 |
+
mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
|
| 782 |
+
if mask_image is not None:
|
| 783 |
+
mask = mask_image.astype(dtype)
|
| 784 |
+
mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
|
| 785 |
+
else:
|
| 786 |
+
mask = None
|
| 787 |
+
|
| 788 |
+
# 5. set timesteps
|
| 789 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 790 |
+
timestep_dtype = next(
|
| 791 |
+
(input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
|
| 792 |
+
)
|
| 793 |
+
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
|
| 794 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
|
| 795 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 796 |
+
|
| 797 |
+
# 6. Prepare latent variables
|
| 798 |
+
latents, init_latents_orig, noise = self.prepare_latents(
|
| 799 |
+
image,
|
| 800 |
+
latent_timestep,
|
| 801 |
+
batch_size * num_images_per_prompt,
|
| 802 |
+
height,
|
| 803 |
+
width,
|
| 804 |
+
dtype,
|
| 805 |
+
generator,
|
| 806 |
+
latents,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 810 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 811 |
+
|
| 812 |
+
# 8. Denoising loop
|
| 813 |
+
for i, t in enumerate(self.progress_bar(timesteps)):
|
| 814 |
+
# expand the latents if we are doing classifier free guidance
|
| 815 |
+
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
|
| 816 |
+
latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
|
| 817 |
+
latent_model_input = latent_model_input.numpy()
|
| 818 |
+
|
| 819 |
+
# predict the noise residual
|
| 820 |
+
noise_pred = self.unet(
|
| 821 |
+
sample=latent_model_input,
|
| 822 |
+
timestep=np.array([t], dtype=timestep_dtype),
|
| 823 |
+
encoder_hidden_states=text_embeddings,
|
| 824 |
+
)
|
| 825 |
+
noise_pred = noise_pred[0]
|
| 826 |
+
|
| 827 |
+
# perform guidance
|
| 828 |
+
if do_classifier_free_guidance:
|
| 829 |
+
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
|
| 830 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 831 |
+
|
| 832 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 833 |
+
scheduler_output = self.scheduler.step(
|
| 834 |
+
torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
|
| 835 |
+
)
|
| 836 |
+
latents = scheduler_output.prev_sample.numpy()
|
| 837 |
+
|
| 838 |
+
if mask is not None:
|
| 839 |
+
# masking
|
| 840 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 841 |
+
torch.from_numpy(init_latents_orig),
|
| 842 |
+
torch.from_numpy(noise),
|
| 843 |
+
t,
|
| 844 |
+
).numpy()
|
| 845 |
+
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
| 846 |
+
|
| 847 |
+
# call the callback, if provided
|
| 848 |
+
if i % callback_steps == 0:
|
| 849 |
+
if callback is not None:
|
| 850 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 851 |
+
callback(step_idx, t, latents)
|
| 852 |
+
if is_cancelled_callback is not None and is_cancelled_callback():
|
| 853 |
+
return None
|
| 854 |
+
|
| 855 |
+
# 9. Post-processing
|
| 856 |
+
image = self.decode_latents(latents)
|
| 857 |
+
|
| 858 |
+
# 10. Run safety checker
|
| 859 |
+
image, has_nsfw_concept = self.run_safety_checker(image)
|
| 860 |
+
|
| 861 |
+
# 11. Convert to PIL
|
| 862 |
+
if output_type == "pil":
|
| 863 |
+
image = self.numpy_to_pil(image)
|
| 864 |
+
|
| 865 |
+
if not return_dict:
|
| 866 |
+
return image, has_nsfw_concept
|
| 867 |
+
|
| 868 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 869 |
+
|
| 870 |
+
def text2img(
|
| 871 |
+
self,
|
| 872 |
+
prompt: Union[str, List[str]],
|
| 873 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 874 |
+
height: int = 512,
|
| 875 |
+
width: int = 512,
|
| 876 |
+
num_inference_steps: int = 50,
|
| 877 |
+
guidance_scale: float = 7.5,
|
| 878 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 879 |
+
eta: float = 0.0,
|
| 880 |
+
generator: Optional[torch.Generator] = None,
|
| 881 |
+
latents: Optional[np.ndarray] = None,
|
| 882 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 883 |
+
output_type: Optional[str] = "pil",
|
| 884 |
+
return_dict: bool = True,
|
| 885 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 886 |
+
callback_steps: int = 1,
|
| 887 |
+
**kwargs,
|
| 888 |
+
):
|
| 889 |
+
r"""
|
| 890 |
+
Function for text-to-image generation.
|
| 891 |
+
Args:
|
| 892 |
+
prompt (`str` or `List[str]`):
|
| 893 |
+
The prompt or prompts to guide the image generation.
|
| 894 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 895 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 896 |
+
if `guidance_scale` is less than `1`).
|
| 897 |
+
height (`int`, *optional*, defaults to 512):
|
| 898 |
+
The height in pixels of the generated image.
|
| 899 |
+
width (`int`, *optional*, defaults to 512):
|
| 900 |
+
The width in pixels of the generated image.
|
| 901 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 902 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 903 |
+
expense of slower inference.
|
| 904 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 905 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 906 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 907 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 908 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 909 |
+
usually at the expense of lower image quality.
|
| 910 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 911 |
+
The number of images to generate per prompt.
|
| 912 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 913 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 914 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 915 |
+
generator (`torch.Generator`, *optional*):
|
| 916 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 917 |
+
deterministic.
|
| 918 |
+
latents (`np.ndarray`, *optional*):
|
| 919 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 920 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 921 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 922 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 923 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 924 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 925 |
+
The output format of the generate image. Choose between
|
| 926 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 927 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 928 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 929 |
+
plain tuple.
|
| 930 |
+
callback (`Callable`, *optional*):
|
| 931 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 932 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 933 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 934 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 935 |
+
called at every step.
|
| 936 |
+
Returns:
|
| 937 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 938 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 939 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 940 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 941 |
+
(nsfw) content, according to the `safety_checker`.
|
| 942 |
+
"""
|
| 943 |
+
return self.__call__(
|
| 944 |
+
prompt=prompt,
|
| 945 |
+
negative_prompt=negative_prompt,
|
| 946 |
+
height=height,
|
| 947 |
+
width=width,
|
| 948 |
+
num_inference_steps=num_inference_steps,
|
| 949 |
+
guidance_scale=guidance_scale,
|
| 950 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 951 |
+
eta=eta,
|
| 952 |
+
generator=generator,
|
| 953 |
+
latents=latents,
|
| 954 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 955 |
+
output_type=output_type,
|
| 956 |
+
return_dict=return_dict,
|
| 957 |
+
callback=callback,
|
| 958 |
+
callback_steps=callback_steps,
|
| 959 |
+
**kwargs,
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
def img2img(
|
| 963 |
+
self,
|
| 964 |
+
image: Union[np.ndarray, PIL.Image.Image],
|
| 965 |
+
prompt: Union[str, List[str]],
|
| 966 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 967 |
+
strength: float = 0.8,
|
| 968 |
+
num_inference_steps: Optional[int] = 50,
|
| 969 |
+
guidance_scale: Optional[float] = 7.5,
|
| 970 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 971 |
+
eta: Optional[float] = 0.0,
|
| 972 |
+
generator: Optional[torch.Generator] = None,
|
| 973 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 974 |
+
output_type: Optional[str] = "pil",
|
| 975 |
+
return_dict: bool = True,
|
| 976 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 977 |
+
callback_steps: int = 1,
|
| 978 |
+
**kwargs,
|
| 979 |
+
):
|
| 980 |
+
r"""
|
| 981 |
+
Function for image-to-image generation.
|
| 982 |
+
Args:
|
| 983 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 984 |
+
`Image`, or ndarray representing an image batch, that will be used as the starting point for the
|
| 985 |
+
process.
|
| 986 |
+
prompt (`str` or `List[str]`):
|
| 987 |
+
The prompt or prompts to guide the image generation.
|
| 988 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 989 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 990 |
+
if `guidance_scale` is less than `1`).
|
| 991 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 992 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 993 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 994 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 995 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 996 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 997 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 998 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 999 |
+
expense of slower inference. This parameter will be modulated by `strength`.
|
| 1000 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1001 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1002 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1003 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1004 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1005 |
+
usually at the expense of lower image quality.
|
| 1006 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1007 |
+
The number of images to generate per prompt.
|
| 1008 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1009 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1010 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1011 |
+
generator (`torch.Generator`, *optional*):
|
| 1012 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 1013 |
+
deterministic.
|
| 1014 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1015 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1016 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1017 |
+
The output format of the generate image. Choose between
|
| 1018 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1019 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1020 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1021 |
+
plain tuple.
|
| 1022 |
+
callback (`Callable`, *optional*):
|
| 1023 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1024 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 1025 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1026 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1027 |
+
called at every step.
|
| 1028 |
+
Returns:
|
| 1029 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1030 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1031 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1032 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1033 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1034 |
+
"""
|
| 1035 |
+
return self.__call__(
|
| 1036 |
+
prompt=prompt,
|
| 1037 |
+
negative_prompt=negative_prompt,
|
| 1038 |
+
image=image,
|
| 1039 |
+
num_inference_steps=num_inference_steps,
|
| 1040 |
+
guidance_scale=guidance_scale,
|
| 1041 |
+
strength=strength,
|
| 1042 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1043 |
+
eta=eta,
|
| 1044 |
+
generator=generator,
|
| 1045 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1046 |
+
output_type=output_type,
|
| 1047 |
+
return_dict=return_dict,
|
| 1048 |
+
callback=callback,
|
| 1049 |
+
callback_steps=callback_steps,
|
| 1050 |
+
**kwargs,
|
| 1051 |
+
)
|
| 1052 |
+
|
| 1053 |
+
def inpaint(
|
| 1054 |
+
self,
|
| 1055 |
+
image: Union[np.ndarray, PIL.Image.Image],
|
| 1056 |
+
mask_image: Union[np.ndarray, PIL.Image.Image],
|
| 1057 |
+
prompt: Union[str, List[str]],
|
| 1058 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1059 |
+
strength: float = 0.8,
|
| 1060 |
+
num_inference_steps: Optional[int] = 50,
|
| 1061 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1062 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1063 |
+
eta: Optional[float] = 0.0,
|
| 1064 |
+
generator: Optional[torch.Generator] = None,
|
| 1065 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1066 |
+
output_type: Optional[str] = "pil",
|
| 1067 |
+
return_dict: bool = True,
|
| 1068 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 1069 |
+
callback_steps: int = 1,
|
| 1070 |
+
**kwargs,
|
| 1071 |
+
):
|
| 1072 |
+
r"""
|
| 1073 |
+
Function for inpaint.
|
| 1074 |
+
Args:
|
| 1075 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 1076 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1077 |
+
process. This is the image whose masked region will be inpainted.
|
| 1078 |
+
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
| 1079 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1080 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 1081 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 1082 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1083 |
+
prompt (`str` or `List[str]`):
|
| 1084 |
+
The prompt or prompts to guide the image generation.
|
| 1085 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1086 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1087 |
+
if `guidance_scale` is less than `1`).
|
| 1088 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1089 |
+
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
| 1090 |
+
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
| 1091 |
+
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
| 1092 |
+
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
| 1093 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1094 |
+
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
| 1095 |
+
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
| 1096 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1097 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1098 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1099 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1100 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1101 |
+
usually at the expense of lower image quality.
|
| 1102 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1103 |
+
The number of images to generate per prompt.
|
| 1104 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1105 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1106 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1107 |
+
generator (`torch.Generator`, *optional*):
|
| 1108 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 1109 |
+
deterministic.
|
| 1110 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1111 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1112 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1113 |
+
The output format of the generate image. Choose between
|
| 1114 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1115 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1116 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1117 |
+
plain tuple.
|
| 1118 |
+
callback (`Callable`, *optional*):
|
| 1119 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1120 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 1121 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1122 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1123 |
+
called at every step.
|
| 1124 |
+
Returns:
|
| 1125 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1126 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1127 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1128 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1129 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1130 |
+
"""
|
| 1131 |
+
return self.__call__(
|
| 1132 |
+
prompt=prompt,
|
| 1133 |
+
negative_prompt=negative_prompt,
|
| 1134 |
+
image=image,
|
| 1135 |
+
mask_image=mask_image,
|
| 1136 |
+
num_inference_steps=num_inference_steps,
|
| 1137 |
+
guidance_scale=guidance_scale,
|
| 1138 |
+
strength=strength,
|
| 1139 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1140 |
+
eta=eta,
|
| 1141 |
+
generator=generator,
|
| 1142 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1143 |
+
output_type=output_type,
|
| 1144 |
+
return_dict=return_dict,
|
| 1145 |
+
callback=callback,
|
| 1146 |
+
callback_steps=callback_steps,
|
| 1147 |
+
**kwargs,
|
| 1148 |
+
)
|
v0.27.0/lpw_stable_diffusion_xl.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.27.0/magic_mix.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from torchvision import transforms as tfms
|
| 6 |
+
from tqdm.auto import tqdm
|
| 7 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 8 |
+
|
| 9 |
+
from diffusers import (
|
| 10 |
+
AutoencoderKL,
|
| 11 |
+
DDIMScheduler,
|
| 12 |
+
DiffusionPipeline,
|
| 13 |
+
LMSDiscreteScheduler,
|
| 14 |
+
PNDMScheduler,
|
| 15 |
+
UNet2DConditionModel,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MagicMixPipeline(DiffusionPipeline):
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
vae: AutoencoderKL,
|
| 23 |
+
text_encoder: CLIPTextModel,
|
| 24 |
+
tokenizer: CLIPTokenizer,
|
| 25 |
+
unet: UNet2DConditionModel,
|
| 26 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
|
| 27 |
+
):
|
| 28 |
+
super().__init__()
|
| 29 |
+
|
| 30 |
+
self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
|
| 31 |
+
|
| 32 |
+
# convert PIL image to latents
|
| 33 |
+
def encode(self, img):
|
| 34 |
+
with torch.no_grad():
|
| 35 |
+
latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
|
| 36 |
+
latent = 0.18215 * latent.latent_dist.sample()
|
| 37 |
+
return latent
|
| 38 |
+
|
| 39 |
+
# convert latents to PIL image
|
| 40 |
+
def decode(self, latent):
|
| 41 |
+
latent = (1 / 0.18215) * latent
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
img = self.vae.decode(latent).sample
|
| 44 |
+
img = (img / 2 + 0.5).clamp(0, 1)
|
| 45 |
+
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 46 |
+
img = (img * 255).round().astype("uint8")
|
| 47 |
+
return Image.fromarray(img[0])
|
| 48 |
+
|
| 49 |
+
# convert prompt into text embeddings, also unconditional embeddings
|
| 50 |
+
def prep_text(self, prompt):
|
| 51 |
+
text_input = self.tokenizer(
|
| 52 |
+
prompt,
|
| 53 |
+
padding="max_length",
|
| 54 |
+
max_length=self.tokenizer.model_max_length,
|
| 55 |
+
truncation=True,
|
| 56 |
+
return_tensors="pt",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 60 |
+
|
| 61 |
+
uncond_input = self.tokenizer(
|
| 62 |
+
"",
|
| 63 |
+
padding="max_length",
|
| 64 |
+
max_length=self.tokenizer.model_max_length,
|
| 65 |
+
truncation=True,
|
| 66 |
+
return_tensors="pt",
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 70 |
+
|
| 71 |
+
return torch.cat([uncond_embedding, text_embedding])
|
| 72 |
+
|
| 73 |
+
def __call__(
|
| 74 |
+
self,
|
| 75 |
+
img: Image.Image,
|
| 76 |
+
prompt: str,
|
| 77 |
+
kmin: float = 0.3,
|
| 78 |
+
kmax: float = 0.6,
|
| 79 |
+
mix_factor: float = 0.5,
|
| 80 |
+
seed: int = 42,
|
| 81 |
+
steps: int = 50,
|
| 82 |
+
guidance_scale: float = 7.5,
|
| 83 |
+
) -> Image.Image:
|
| 84 |
+
tmin = steps - int(kmin * steps)
|
| 85 |
+
tmax = steps - int(kmax * steps)
|
| 86 |
+
|
| 87 |
+
text_embeddings = self.prep_text(prompt)
|
| 88 |
+
|
| 89 |
+
self.scheduler.set_timesteps(steps)
|
| 90 |
+
|
| 91 |
+
width, height = img.size
|
| 92 |
+
encoded = self.encode(img)
|
| 93 |
+
|
| 94 |
+
torch.manual_seed(seed)
|
| 95 |
+
noise = torch.randn(
|
| 96 |
+
(1, self.unet.config.in_channels, height // 8, width // 8),
|
| 97 |
+
).to(self.device)
|
| 98 |
+
|
| 99 |
+
latents = self.scheduler.add_noise(
|
| 100 |
+
encoded,
|
| 101 |
+
noise,
|
| 102 |
+
timesteps=self.scheduler.timesteps[tmax],
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
input = torch.cat([latents] * 2)
|
| 106 |
+
|
| 107 |
+
input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
|
| 108 |
+
|
| 109 |
+
with torch.no_grad():
|
| 110 |
+
pred = self.unet(
|
| 111 |
+
input,
|
| 112 |
+
self.scheduler.timesteps[tmax],
|
| 113 |
+
encoder_hidden_states=text_embeddings,
|
| 114 |
+
).sample
|
| 115 |
+
|
| 116 |
+
pred_uncond, pred_text = pred.chunk(2)
|
| 117 |
+
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
|
| 118 |
+
|
| 119 |
+
latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
|
| 120 |
+
|
| 121 |
+
for i, t in enumerate(tqdm(self.scheduler.timesteps)):
|
| 122 |
+
if i > tmax:
|
| 123 |
+
if i < tmin: # layout generation phase
|
| 124 |
+
orig_latents = self.scheduler.add_noise(
|
| 125 |
+
encoded,
|
| 126 |
+
noise,
|
| 127 |
+
timesteps=t,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
input = (
|
| 131 |
+
(mix_factor * latents) + (1 - mix_factor) * orig_latents
|
| 132 |
+
) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
|
| 133 |
+
input = torch.cat([input] * 2)
|
| 134 |
+
|
| 135 |
+
else: # content generation phase
|
| 136 |
+
input = torch.cat([latents] * 2)
|
| 137 |
+
|
| 138 |
+
input = self.scheduler.scale_model_input(input, t)
|
| 139 |
+
|
| 140 |
+
with torch.no_grad():
|
| 141 |
+
pred = self.unet(
|
| 142 |
+
input,
|
| 143 |
+
t,
|
| 144 |
+
encoder_hidden_states=text_embeddings,
|
| 145 |
+
).sample
|
| 146 |
+
|
| 147 |
+
pred_uncond, pred_text = pred.chunk(2)
|
| 148 |
+
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
|
| 149 |
+
|
| 150 |
+
latents = self.scheduler.step(pred, t, latents).prev_sample
|
| 151 |
+
|
| 152 |
+
return self.decode(latents)
|
v0.27.0/marigold_depth_estimation.py
ADDED
|
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bingxin Ke, ETH Zurich and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# --------------------------------------------------------------------------
|
| 15 |
+
# If you find this code useful, we kindly ask you to cite our paper in your work.
|
| 16 |
+
# Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
|
| 17 |
+
# More information about the method can be found at https://marigoldmonodepth.github.io
|
| 18 |
+
# --------------------------------------------------------------------------
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import math
|
| 22 |
+
from typing import Dict, Union
|
| 23 |
+
|
| 24 |
+
import matplotlib
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
from PIL import Image
|
| 28 |
+
from scipy.optimize import minimize
|
| 29 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 30 |
+
from tqdm.auto import tqdm
|
| 31 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 32 |
+
|
| 33 |
+
from diffusers import (
|
| 34 |
+
AutoencoderKL,
|
| 35 |
+
DDIMScheduler,
|
| 36 |
+
DiffusionPipeline,
|
| 37 |
+
UNet2DConditionModel,
|
| 38 |
+
)
|
| 39 |
+
from diffusers.utils import BaseOutput, check_min_version
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
| 43 |
+
check_min_version("0.27.0")
|
| 44 |
+
|
| 45 |
+
class MarigoldDepthOutput(BaseOutput):
|
| 46 |
+
"""
|
| 47 |
+
Output class for Marigold monocular depth prediction pipeline.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
depth_np (`np.ndarray`):
|
| 51 |
+
Predicted depth map, with depth values in the range of [0, 1].
|
| 52 |
+
depth_colored (`None` or `PIL.Image.Image`):
|
| 53 |
+
Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
|
| 54 |
+
uncertainty (`None` or `np.ndarray`):
|
| 55 |
+
Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
depth_np: np.ndarray
|
| 59 |
+
depth_colored: Union[None, Image.Image]
|
| 60 |
+
uncertainty: Union[None, np.ndarray]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class MarigoldPipeline(DiffusionPipeline):
|
| 64 |
+
"""
|
| 65 |
+
Pipeline for monocular depth estimation using Marigold: https://marigoldmonodepth.github.io.
|
| 66 |
+
|
| 67 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 68 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
unet (`UNet2DConditionModel`):
|
| 72 |
+
Conditional U-Net to denoise the depth latent, conditioned on image latent.
|
| 73 |
+
vae (`AutoencoderKL`):
|
| 74 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images and depth maps
|
| 75 |
+
to and from latent representations.
|
| 76 |
+
scheduler (`DDIMScheduler`):
|
| 77 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
| 78 |
+
text_encoder (`CLIPTextModel`):
|
| 79 |
+
Text-encoder, for empty text embedding.
|
| 80 |
+
tokenizer (`CLIPTokenizer`):
|
| 81 |
+
CLIP tokenizer.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
rgb_latent_scale_factor = 0.18215
|
| 85 |
+
depth_latent_scale_factor = 0.18215
|
| 86 |
+
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
unet: UNet2DConditionModel,
|
| 90 |
+
vae: AutoencoderKL,
|
| 91 |
+
scheduler: DDIMScheduler,
|
| 92 |
+
text_encoder: CLIPTextModel,
|
| 93 |
+
tokenizer: CLIPTokenizer,
|
| 94 |
+
):
|
| 95 |
+
super().__init__()
|
| 96 |
+
|
| 97 |
+
self.register_modules(
|
| 98 |
+
unet=unet,
|
| 99 |
+
vae=vae,
|
| 100 |
+
scheduler=scheduler,
|
| 101 |
+
text_encoder=text_encoder,
|
| 102 |
+
tokenizer=tokenizer,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
self.empty_text_embed = None
|
| 106 |
+
|
| 107 |
+
@torch.no_grad()
|
| 108 |
+
def __call__(
|
| 109 |
+
self,
|
| 110 |
+
input_image: Image,
|
| 111 |
+
denoising_steps: int = 10,
|
| 112 |
+
ensemble_size: int = 10,
|
| 113 |
+
processing_res: int = 768,
|
| 114 |
+
match_input_res: bool = True,
|
| 115 |
+
batch_size: int = 0,
|
| 116 |
+
color_map: str = "Spectral",
|
| 117 |
+
show_progress_bar: bool = True,
|
| 118 |
+
ensemble_kwargs: Dict = None,
|
| 119 |
+
) -> MarigoldDepthOutput:
|
| 120 |
+
"""
|
| 121 |
+
Function invoked when calling the pipeline.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
input_image (`Image`):
|
| 125 |
+
Input RGB (or gray-scale) image.
|
| 126 |
+
processing_res (`int`, *optional*, defaults to `768`):
|
| 127 |
+
Maximum resolution of processing.
|
| 128 |
+
If set to 0: will not resize at all.
|
| 129 |
+
match_input_res (`bool`, *optional*, defaults to `True`):
|
| 130 |
+
Resize depth prediction to match input resolution.
|
| 131 |
+
Only valid if `limit_input_res` is not None.
|
| 132 |
+
denoising_steps (`int`, *optional*, defaults to `10`):
|
| 133 |
+
Number of diffusion denoising steps (DDIM) during inference.
|
| 134 |
+
ensemble_size (`int`, *optional*, defaults to `10`):
|
| 135 |
+
Number of predictions to be ensembled.
|
| 136 |
+
batch_size (`int`, *optional*, defaults to `0`):
|
| 137 |
+
Inference batch size, no bigger than `num_ensemble`.
|
| 138 |
+
If set to 0, the script will automatically decide the proper batch size.
|
| 139 |
+
show_progress_bar (`bool`, *optional*, defaults to `True`):
|
| 140 |
+
Display a progress bar of diffusion denoising.
|
| 141 |
+
color_map (`str`, *optional*, defaults to `"Spectral"`, pass `None` to skip colorized depth map generation):
|
| 142 |
+
Colormap used to colorize the depth map.
|
| 143 |
+
ensemble_kwargs (`dict`, *optional*, defaults to `None`):
|
| 144 |
+
Arguments for detailed ensembling settings.
|
| 145 |
+
Returns:
|
| 146 |
+
`MarigoldDepthOutput`: Output class for Marigold monocular depth prediction pipeline, including:
|
| 147 |
+
- **depth_np** (`np.ndarray`) Predicted depth map, with depth values in the range of [0, 1]
|
| 148 |
+
- **depth_colored** (`None` or `PIL.Image.Image`) Colorized depth map, with the shape of [3, H, W] and
|
| 149 |
+
values in [0, 1]. None if `color_map` is `None`
|
| 150 |
+
- **uncertainty** (`None` or `np.ndarray`) Uncalibrated uncertainty(MAD, median absolute deviation)
|
| 151 |
+
coming from ensembling. None if `ensemble_size = 1`
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
device = self.device
|
| 155 |
+
input_size = input_image.size
|
| 156 |
+
|
| 157 |
+
if not match_input_res:
|
| 158 |
+
assert processing_res is not None, "Value error: `resize_output_back` is only valid with "
|
| 159 |
+
assert processing_res >= 0
|
| 160 |
+
assert denoising_steps >= 1
|
| 161 |
+
assert ensemble_size >= 1
|
| 162 |
+
|
| 163 |
+
# ----------------- Image Preprocess -----------------
|
| 164 |
+
# Resize image
|
| 165 |
+
if processing_res > 0:
|
| 166 |
+
input_image = self.resize_max_res(input_image, max_edge_resolution=processing_res)
|
| 167 |
+
# Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
|
| 168 |
+
input_image = input_image.convert("RGB")
|
| 169 |
+
image = np.asarray(input_image)
|
| 170 |
+
|
| 171 |
+
# Normalize rgb values
|
| 172 |
+
rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
|
| 173 |
+
rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
|
| 174 |
+
rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
|
| 175 |
+
rgb_norm = rgb_norm.to(device)
|
| 176 |
+
assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
|
| 177 |
+
|
| 178 |
+
# ----------------- Predicting depth -----------------
|
| 179 |
+
# Batch repeated input image
|
| 180 |
+
duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
|
| 181 |
+
single_rgb_dataset = TensorDataset(duplicated_rgb)
|
| 182 |
+
if batch_size > 0:
|
| 183 |
+
_bs = batch_size
|
| 184 |
+
else:
|
| 185 |
+
_bs = self._find_batch_size(
|
| 186 |
+
ensemble_size=ensemble_size,
|
| 187 |
+
input_res=max(rgb_norm.shape[1:]),
|
| 188 |
+
dtype=self.dtype,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
|
| 192 |
+
|
| 193 |
+
# Predict depth maps (batched)
|
| 194 |
+
depth_pred_ls = []
|
| 195 |
+
if show_progress_bar:
|
| 196 |
+
iterable = tqdm(single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False)
|
| 197 |
+
else:
|
| 198 |
+
iterable = single_rgb_loader
|
| 199 |
+
for batch in iterable:
|
| 200 |
+
(batched_img,) = batch
|
| 201 |
+
depth_pred_raw = self.single_infer(
|
| 202 |
+
rgb_in=batched_img,
|
| 203 |
+
num_inference_steps=denoising_steps,
|
| 204 |
+
show_pbar=show_progress_bar,
|
| 205 |
+
)
|
| 206 |
+
depth_pred_ls.append(depth_pred_raw.detach().clone())
|
| 207 |
+
depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze()
|
| 208 |
+
torch.cuda.empty_cache() # clear vram cache for ensembling
|
| 209 |
+
|
| 210 |
+
# ----------------- Test-time ensembling -----------------
|
| 211 |
+
if ensemble_size > 1:
|
| 212 |
+
depth_pred, pred_uncert = self.ensemble_depths(depth_preds, **(ensemble_kwargs or {}))
|
| 213 |
+
else:
|
| 214 |
+
depth_pred = depth_preds
|
| 215 |
+
pred_uncert = None
|
| 216 |
+
|
| 217 |
+
# ----------------- Post processing -----------------
|
| 218 |
+
# Scale prediction to [0, 1]
|
| 219 |
+
min_d = torch.min(depth_pred)
|
| 220 |
+
max_d = torch.max(depth_pred)
|
| 221 |
+
depth_pred = (depth_pred - min_d) / (max_d - min_d)
|
| 222 |
+
|
| 223 |
+
# Convert to numpy
|
| 224 |
+
depth_pred = depth_pred.cpu().numpy().astype(np.float32)
|
| 225 |
+
|
| 226 |
+
# Resize back to original resolution
|
| 227 |
+
if match_input_res:
|
| 228 |
+
pred_img = Image.fromarray(depth_pred)
|
| 229 |
+
pred_img = pred_img.resize(input_size)
|
| 230 |
+
depth_pred = np.asarray(pred_img)
|
| 231 |
+
|
| 232 |
+
# Clip output range
|
| 233 |
+
depth_pred = depth_pred.clip(0, 1)
|
| 234 |
+
|
| 235 |
+
# Colorize
|
| 236 |
+
if color_map is not None:
|
| 237 |
+
depth_colored = self.colorize_depth_maps(
|
| 238 |
+
depth_pred, 0, 1, cmap=color_map
|
| 239 |
+
).squeeze() # [3, H, W], value in (0, 1)
|
| 240 |
+
depth_colored = (depth_colored * 255).astype(np.uint8)
|
| 241 |
+
depth_colored_hwc = self.chw2hwc(depth_colored)
|
| 242 |
+
depth_colored_img = Image.fromarray(depth_colored_hwc)
|
| 243 |
+
else:
|
| 244 |
+
depth_colored_img = None
|
| 245 |
+
return MarigoldDepthOutput(
|
| 246 |
+
depth_np=depth_pred,
|
| 247 |
+
depth_colored=depth_colored_img,
|
| 248 |
+
uncertainty=pred_uncert,
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
def _encode_empty_text(self):
|
| 252 |
+
"""
|
| 253 |
+
Encode text embedding for empty prompt.
|
| 254 |
+
"""
|
| 255 |
+
prompt = ""
|
| 256 |
+
text_inputs = self.tokenizer(
|
| 257 |
+
prompt,
|
| 258 |
+
padding="do_not_pad",
|
| 259 |
+
max_length=self.tokenizer.model_max_length,
|
| 260 |
+
truncation=True,
|
| 261 |
+
return_tensors="pt",
|
| 262 |
+
)
|
| 263 |
+
text_input_ids = text_inputs.input_ids.to(self.text_encoder.device)
|
| 264 |
+
self.empty_text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype)
|
| 265 |
+
|
| 266 |
+
@torch.no_grad()
|
| 267 |
+
def single_infer(self, rgb_in: torch.Tensor, num_inference_steps: int, show_pbar: bool) -> torch.Tensor:
|
| 268 |
+
"""
|
| 269 |
+
Perform an individual depth prediction without ensembling.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
rgb_in (`torch.Tensor`):
|
| 273 |
+
Input RGB image.
|
| 274 |
+
num_inference_steps (`int`):
|
| 275 |
+
Number of diffusion denoisign steps (DDIM) during inference.
|
| 276 |
+
show_pbar (`bool`):
|
| 277 |
+
Display a progress bar of diffusion denoising.
|
| 278 |
+
Returns:
|
| 279 |
+
`torch.Tensor`: Predicted depth map.
|
| 280 |
+
"""
|
| 281 |
+
device = rgb_in.device
|
| 282 |
+
|
| 283 |
+
# Set timesteps
|
| 284 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 285 |
+
timesteps = self.scheduler.timesteps # [T]
|
| 286 |
+
|
| 287 |
+
# Encode image
|
| 288 |
+
rgb_latent = self._encode_rgb(rgb_in)
|
| 289 |
+
|
| 290 |
+
# Initial depth map (noise)
|
| 291 |
+
depth_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype) # [B, 4, h, w]
|
| 292 |
+
|
| 293 |
+
# Batched empty text embedding
|
| 294 |
+
if self.empty_text_embed is None:
|
| 295 |
+
self._encode_empty_text()
|
| 296 |
+
batch_empty_text_embed = self.empty_text_embed.repeat((rgb_latent.shape[0], 1, 1)) # [B, 2, 1024]
|
| 297 |
+
|
| 298 |
+
# Denoising loop
|
| 299 |
+
if show_pbar:
|
| 300 |
+
iterable = tqdm(
|
| 301 |
+
enumerate(timesteps),
|
| 302 |
+
total=len(timesteps),
|
| 303 |
+
leave=False,
|
| 304 |
+
desc=" " * 4 + "Diffusion denoising",
|
| 305 |
+
)
|
| 306 |
+
else:
|
| 307 |
+
iterable = enumerate(timesteps)
|
| 308 |
+
|
| 309 |
+
for i, t in iterable:
|
| 310 |
+
unet_input = torch.cat([rgb_latent, depth_latent], dim=1) # this order is important
|
| 311 |
+
|
| 312 |
+
# predict the noise residual
|
| 313 |
+
noise_pred = self.unet(unet_input, t, encoder_hidden_states=batch_empty_text_embed).sample # [B, 4, h, w]
|
| 314 |
+
|
| 315 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 316 |
+
depth_latent = self.scheduler.step(noise_pred, t, depth_latent).prev_sample
|
| 317 |
+
torch.cuda.empty_cache()
|
| 318 |
+
depth = self._decode_depth(depth_latent)
|
| 319 |
+
|
| 320 |
+
# clip prediction
|
| 321 |
+
depth = torch.clip(depth, -1.0, 1.0)
|
| 322 |
+
# shift to [0, 1]
|
| 323 |
+
depth = (depth + 1.0) / 2.0
|
| 324 |
+
|
| 325 |
+
return depth
|
| 326 |
+
|
| 327 |
+
def _encode_rgb(self, rgb_in: torch.Tensor) -> torch.Tensor:
|
| 328 |
+
"""
|
| 329 |
+
Encode RGB image into latent.
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
rgb_in (`torch.Tensor`):
|
| 333 |
+
Input RGB image to be encoded.
|
| 334 |
+
|
| 335 |
+
Returns:
|
| 336 |
+
`torch.Tensor`: Image latent.
|
| 337 |
+
"""
|
| 338 |
+
# encode
|
| 339 |
+
h = self.vae.encoder(rgb_in)
|
| 340 |
+
moments = self.vae.quant_conv(h)
|
| 341 |
+
mean, logvar = torch.chunk(moments, 2, dim=1)
|
| 342 |
+
# scale latent
|
| 343 |
+
rgb_latent = mean * self.rgb_latent_scale_factor
|
| 344 |
+
return rgb_latent
|
| 345 |
+
|
| 346 |
+
def _decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
|
| 347 |
+
"""
|
| 348 |
+
Decode depth latent into depth map.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
depth_latent (`torch.Tensor`):
|
| 352 |
+
Depth latent to be decoded.
|
| 353 |
+
|
| 354 |
+
Returns:
|
| 355 |
+
`torch.Tensor`: Decoded depth map.
|
| 356 |
+
"""
|
| 357 |
+
# scale latent
|
| 358 |
+
depth_latent = depth_latent / self.depth_latent_scale_factor
|
| 359 |
+
# decode
|
| 360 |
+
z = self.vae.post_quant_conv(depth_latent)
|
| 361 |
+
stacked = self.vae.decoder(z)
|
| 362 |
+
# mean of output channels
|
| 363 |
+
depth_mean = stacked.mean(dim=1, keepdim=True)
|
| 364 |
+
return depth_mean
|
| 365 |
+
|
| 366 |
+
@staticmethod
|
| 367 |
+
def resize_max_res(img: Image.Image, max_edge_resolution: int) -> Image.Image:
|
| 368 |
+
"""
|
| 369 |
+
Resize image to limit maximum edge length while keeping aspect ratio.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
img (`Image.Image`):
|
| 373 |
+
Image to be resized.
|
| 374 |
+
max_edge_resolution (`int`):
|
| 375 |
+
Maximum edge length (pixel).
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
`Image.Image`: Resized image.
|
| 379 |
+
"""
|
| 380 |
+
original_width, original_height = img.size
|
| 381 |
+
downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
|
| 382 |
+
|
| 383 |
+
new_width = int(original_width * downscale_factor)
|
| 384 |
+
new_height = int(original_height * downscale_factor)
|
| 385 |
+
|
| 386 |
+
resized_img = img.resize((new_width, new_height))
|
| 387 |
+
return resized_img
|
| 388 |
+
|
| 389 |
+
@staticmethod
|
| 390 |
+
def colorize_depth_maps(depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None):
|
| 391 |
+
"""
|
| 392 |
+
Colorize depth maps.
|
| 393 |
+
"""
|
| 394 |
+
assert len(depth_map.shape) >= 2, "Invalid dimension"
|
| 395 |
+
|
| 396 |
+
if isinstance(depth_map, torch.Tensor):
|
| 397 |
+
depth = depth_map.detach().clone().squeeze().numpy()
|
| 398 |
+
elif isinstance(depth_map, np.ndarray):
|
| 399 |
+
depth = depth_map.copy().squeeze()
|
| 400 |
+
# reshape to [ (B,) H, W ]
|
| 401 |
+
if depth.ndim < 3:
|
| 402 |
+
depth = depth[np.newaxis, :, :]
|
| 403 |
+
|
| 404 |
+
# colorize
|
| 405 |
+
cm = matplotlib.colormaps[cmap]
|
| 406 |
+
depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
|
| 407 |
+
img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
|
| 408 |
+
img_colored_np = np.rollaxis(img_colored_np, 3, 1)
|
| 409 |
+
|
| 410 |
+
if valid_mask is not None:
|
| 411 |
+
if isinstance(depth_map, torch.Tensor):
|
| 412 |
+
valid_mask = valid_mask.detach().numpy()
|
| 413 |
+
valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
|
| 414 |
+
if valid_mask.ndim < 3:
|
| 415 |
+
valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
|
| 416 |
+
else:
|
| 417 |
+
valid_mask = valid_mask[:, np.newaxis, :, :]
|
| 418 |
+
valid_mask = np.repeat(valid_mask, 3, axis=1)
|
| 419 |
+
img_colored_np[~valid_mask] = 0
|
| 420 |
+
|
| 421 |
+
if isinstance(depth_map, torch.Tensor):
|
| 422 |
+
img_colored = torch.from_numpy(img_colored_np).float()
|
| 423 |
+
elif isinstance(depth_map, np.ndarray):
|
| 424 |
+
img_colored = img_colored_np
|
| 425 |
+
|
| 426 |
+
return img_colored
|
| 427 |
+
|
| 428 |
+
@staticmethod
|
| 429 |
+
def chw2hwc(chw):
|
| 430 |
+
assert 3 == len(chw.shape)
|
| 431 |
+
if isinstance(chw, torch.Tensor):
|
| 432 |
+
hwc = torch.permute(chw, (1, 2, 0))
|
| 433 |
+
elif isinstance(chw, np.ndarray):
|
| 434 |
+
hwc = np.moveaxis(chw, 0, -1)
|
| 435 |
+
return hwc
|
| 436 |
+
|
| 437 |
+
@staticmethod
|
| 438 |
+
def _find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int:
|
| 439 |
+
"""
|
| 440 |
+
Automatically search for suitable operating batch size.
|
| 441 |
+
|
| 442 |
+
Args:
|
| 443 |
+
ensemble_size (`int`):
|
| 444 |
+
Number of predictions to be ensembled.
|
| 445 |
+
input_res (`int`):
|
| 446 |
+
Operating resolution of the input image.
|
| 447 |
+
|
| 448 |
+
Returns:
|
| 449 |
+
`int`: Operating batch size.
|
| 450 |
+
"""
|
| 451 |
+
# Search table for suggested max. inference batch size
|
| 452 |
+
bs_search_table = [
|
| 453 |
+
# tested on A100-PCIE-80GB
|
| 454 |
+
{"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32},
|
| 455 |
+
{"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32},
|
| 456 |
+
# tested on A100-PCIE-40GB
|
| 457 |
+
{"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32},
|
| 458 |
+
{"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32},
|
| 459 |
+
{"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16},
|
| 460 |
+
{"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16},
|
| 461 |
+
# tested on RTX3090, RTX4090
|
| 462 |
+
{"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32},
|
| 463 |
+
{"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32},
|
| 464 |
+
{"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32},
|
| 465 |
+
{"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16},
|
| 466 |
+
{"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16},
|
| 467 |
+
{"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16},
|
| 468 |
+
# tested on GTX1080Ti
|
| 469 |
+
{"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32},
|
| 470 |
+
{"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32},
|
| 471 |
+
{"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16},
|
| 472 |
+
{"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16},
|
| 473 |
+
{"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16},
|
| 474 |
+
]
|
| 475 |
+
|
| 476 |
+
if not torch.cuda.is_available():
|
| 477 |
+
return 1
|
| 478 |
+
|
| 479 |
+
total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
|
| 480 |
+
filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype]
|
| 481 |
+
for settings in sorted(
|
| 482 |
+
filtered_bs_search_table,
|
| 483 |
+
key=lambda k: (k["res"], -k["total_vram"]),
|
| 484 |
+
):
|
| 485 |
+
if input_res <= settings["res"] and total_vram >= settings["total_vram"]:
|
| 486 |
+
bs = settings["bs"]
|
| 487 |
+
if bs > ensemble_size:
|
| 488 |
+
bs = ensemble_size
|
| 489 |
+
elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size:
|
| 490 |
+
bs = math.ceil(ensemble_size / 2)
|
| 491 |
+
return bs
|
| 492 |
+
|
| 493 |
+
return 1
|
| 494 |
+
|
| 495 |
+
@staticmethod
|
| 496 |
+
def ensemble_depths(
|
| 497 |
+
input_images: torch.Tensor,
|
| 498 |
+
regularizer_strength: float = 0.02,
|
| 499 |
+
max_iter: int = 2,
|
| 500 |
+
tol: float = 1e-3,
|
| 501 |
+
reduction: str = "median",
|
| 502 |
+
max_res: int = None,
|
| 503 |
+
):
|
| 504 |
+
"""
|
| 505 |
+
To ensemble multiple affine-invariant depth images (up to scale and shift),
|
| 506 |
+
by aligning estimating the scale and shift
|
| 507 |
+
"""
|
| 508 |
+
|
| 509 |
+
def inter_distances(tensors: torch.Tensor):
|
| 510 |
+
"""
|
| 511 |
+
To calculate the distance between each two depth maps.
|
| 512 |
+
"""
|
| 513 |
+
distances = []
|
| 514 |
+
for i, j in torch.combinations(torch.arange(tensors.shape[0])):
|
| 515 |
+
arr1 = tensors[i : i + 1]
|
| 516 |
+
arr2 = tensors[j : j + 1]
|
| 517 |
+
distances.append(arr1 - arr2)
|
| 518 |
+
dist = torch.concatenate(distances, dim=0)
|
| 519 |
+
return dist
|
| 520 |
+
|
| 521 |
+
device = input_images.device
|
| 522 |
+
dtype = input_images.dtype
|
| 523 |
+
np_dtype = np.float32
|
| 524 |
+
|
| 525 |
+
original_input = input_images.clone()
|
| 526 |
+
n_img = input_images.shape[0]
|
| 527 |
+
ori_shape = input_images.shape
|
| 528 |
+
|
| 529 |
+
if max_res is not None:
|
| 530 |
+
scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
|
| 531 |
+
if scale_factor < 1:
|
| 532 |
+
downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
|
| 533 |
+
input_images = downscaler(torch.from_numpy(input_images)).numpy()
|
| 534 |
+
|
| 535 |
+
# init guess
|
| 536 |
+
_min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
|
| 537 |
+
_max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
|
| 538 |
+
s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
|
| 539 |
+
t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
|
| 540 |
+
x = np.concatenate([s_init, t_init]).reshape(-1).astype(np_dtype)
|
| 541 |
+
|
| 542 |
+
input_images = input_images.to(device)
|
| 543 |
+
|
| 544 |
+
# objective function
|
| 545 |
+
def closure(x):
|
| 546 |
+
l = len(x)
|
| 547 |
+
s = x[: int(l / 2)]
|
| 548 |
+
t = x[int(l / 2) :]
|
| 549 |
+
s = torch.from_numpy(s).to(dtype=dtype).to(device)
|
| 550 |
+
t = torch.from_numpy(t).to(dtype=dtype).to(device)
|
| 551 |
+
|
| 552 |
+
transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
|
| 553 |
+
dists = inter_distances(transformed_arrays)
|
| 554 |
+
sqrt_dist = torch.sqrt(torch.mean(dists**2))
|
| 555 |
+
|
| 556 |
+
if "mean" == reduction:
|
| 557 |
+
pred = torch.mean(transformed_arrays, dim=0)
|
| 558 |
+
elif "median" == reduction:
|
| 559 |
+
pred = torch.median(transformed_arrays, dim=0).values
|
| 560 |
+
else:
|
| 561 |
+
raise ValueError
|
| 562 |
+
|
| 563 |
+
near_err = torch.sqrt((0 - torch.min(pred)) ** 2)
|
| 564 |
+
far_err = torch.sqrt((1 - torch.max(pred)) ** 2)
|
| 565 |
+
|
| 566 |
+
err = sqrt_dist + (near_err + far_err) * regularizer_strength
|
| 567 |
+
err = err.detach().cpu().numpy().astype(np_dtype)
|
| 568 |
+
return err
|
| 569 |
+
|
| 570 |
+
res = minimize(
|
| 571 |
+
closure,
|
| 572 |
+
x,
|
| 573 |
+
method="BFGS",
|
| 574 |
+
tol=tol,
|
| 575 |
+
options={"maxiter": max_iter, "disp": False},
|
| 576 |
+
)
|
| 577 |
+
x = res.x
|
| 578 |
+
l = len(x)
|
| 579 |
+
s = x[: int(l / 2)]
|
| 580 |
+
t = x[int(l / 2) :]
|
| 581 |
+
|
| 582 |
+
# Prediction
|
| 583 |
+
s = torch.from_numpy(s).to(dtype=dtype).to(device)
|
| 584 |
+
t = torch.from_numpy(t).to(dtype=dtype).to(device)
|
| 585 |
+
transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
|
| 586 |
+
if "mean" == reduction:
|
| 587 |
+
aligned_images = torch.mean(transformed_arrays, dim=0)
|
| 588 |
+
std = torch.std(transformed_arrays, dim=0)
|
| 589 |
+
uncertainty = std
|
| 590 |
+
elif "median" == reduction:
|
| 591 |
+
aligned_images = torch.median(transformed_arrays, dim=0).values
|
| 592 |
+
# MAD (median absolute deviation) as uncertainty indicator
|
| 593 |
+
abs_dev = torch.abs(transformed_arrays - aligned_images)
|
| 594 |
+
mad = torch.median(abs_dev, dim=0).values
|
| 595 |
+
uncertainty = mad
|
| 596 |
+
else:
|
| 597 |
+
raise ValueError(f"Unknown reduction method: {reduction}")
|
| 598 |
+
|
| 599 |
+
# Scale and shift to [0, 1]
|
| 600 |
+
_min = torch.min(aligned_images)
|
| 601 |
+
_max = torch.max(aligned_images)
|
| 602 |
+
aligned_images = (aligned_images - _min) / (_max - _min)
|
| 603 |
+
uncertainty /= _max - _min
|
| 604 |
+
|
| 605 |
+
return aligned_images, uncertainty
|
v0.27.0/masked_stable_diffusion_img2img.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import PIL.Image
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
| 8 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
|
| 12 |
+
debug_save = False
|
| 13 |
+
|
| 14 |
+
@torch.no_grad()
|
| 15 |
+
def __call__(
|
| 16 |
+
self,
|
| 17 |
+
prompt: Union[str, List[str]] = None,
|
| 18 |
+
image: Union[
|
| 19 |
+
torch.FloatTensor,
|
| 20 |
+
PIL.Image.Image,
|
| 21 |
+
np.ndarray,
|
| 22 |
+
List[torch.FloatTensor],
|
| 23 |
+
List[PIL.Image.Image],
|
| 24 |
+
List[np.ndarray],
|
| 25 |
+
] = None,
|
| 26 |
+
strength: float = 0.8,
|
| 27 |
+
num_inference_steps: Optional[int] = 50,
|
| 28 |
+
guidance_scale: Optional[float] = 7.5,
|
| 29 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 30 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 31 |
+
eta: Optional[float] = 0.0,
|
| 32 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 33 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 34 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 35 |
+
output_type: Optional[str] = "pil",
|
| 36 |
+
return_dict: bool = True,
|
| 37 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 38 |
+
callback_steps: int = 1,
|
| 39 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 40 |
+
mask: Union[
|
| 41 |
+
torch.FloatTensor,
|
| 42 |
+
PIL.Image.Image,
|
| 43 |
+
np.ndarray,
|
| 44 |
+
List[torch.FloatTensor],
|
| 45 |
+
List[PIL.Image.Image],
|
| 46 |
+
List[np.ndarray],
|
| 47 |
+
] = None,
|
| 48 |
+
):
|
| 49 |
+
r"""
|
| 50 |
+
The call function to the pipeline for generation.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 54 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 55 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
| 56 |
+
`Image` or tensor representing an image batch to be used as the starting point. Can also accept image
|
| 57 |
+
latents as `image`, but if passing latents directly it is not encoded again.
|
| 58 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 59 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 60 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 61 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 62 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 63 |
+
essentially ignores `image`.
|
| 64 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 65 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 66 |
+
expense of slower inference. This parameter is modulated by `strength`.
|
| 67 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 68 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 69 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 70 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 71 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 72 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 73 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 74 |
+
The number of images to generate per prompt.
|
| 75 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 76 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 77 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 78 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 79 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 80 |
+
generation deterministic.
|
| 81 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 82 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 83 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 84 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 85 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 86 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 87 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 88 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 89 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 90 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 91 |
+
plain tuple.
|
| 92 |
+
callback (`Callable`, *optional*):
|
| 93 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 94 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 95 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 96 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 97 |
+
every step.
|
| 98 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 99 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 100 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 101 |
+
mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
|
| 102 |
+
A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
|
| 103 |
+
Examples:
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 107 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 108 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 109 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 110 |
+
"not-safe-for-work" (nsfw) content.
|
| 111 |
+
"""
|
| 112 |
+
# code adapted from parent class StableDiffusionImg2ImgPipeline
|
| 113 |
+
|
| 114 |
+
# 0. Check inputs. Raise error if not correct
|
| 115 |
+
self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
|
| 116 |
+
|
| 117 |
+
# 1. Define call parameters
|
| 118 |
+
if prompt is not None and isinstance(prompt, str):
|
| 119 |
+
batch_size = 1
|
| 120 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 121 |
+
batch_size = len(prompt)
|
| 122 |
+
else:
|
| 123 |
+
batch_size = prompt_embeds.shape[0]
|
| 124 |
+
device = self._execution_device
|
| 125 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 126 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 127 |
+
# corresponds to doing no classifier free guidance.
|
| 128 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 129 |
+
|
| 130 |
+
# 2. Encode input prompt
|
| 131 |
+
text_encoder_lora_scale = (
|
| 132 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 133 |
+
)
|
| 134 |
+
prompt_embeds = self._encode_prompt(
|
| 135 |
+
prompt,
|
| 136 |
+
device,
|
| 137 |
+
num_images_per_prompt,
|
| 138 |
+
do_classifier_free_guidance,
|
| 139 |
+
negative_prompt,
|
| 140 |
+
prompt_embeds=prompt_embeds,
|
| 141 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 142 |
+
lora_scale=text_encoder_lora_scale,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
# 3. Preprocess image
|
| 146 |
+
image = self.image_processor.preprocess(image)
|
| 147 |
+
|
| 148 |
+
# 4. set timesteps
|
| 149 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 150 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 151 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 152 |
+
|
| 153 |
+
# 5. Prepare latent variables
|
| 154 |
+
# it is sampled from the latent distribution of the VAE
|
| 155 |
+
latents = self.prepare_latents(
|
| 156 |
+
image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# mean of the latent distribution
|
| 160 |
+
init_latents = [
|
| 161 |
+
self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean
|
| 162 |
+
for i in range(batch_size)
|
| 163 |
+
]
|
| 164 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 165 |
+
|
| 166 |
+
# 6. create latent mask
|
| 167 |
+
latent_mask = self._make_latent_mask(latents, mask)
|
| 168 |
+
|
| 169 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 170 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 171 |
+
|
| 172 |
+
# 8. Denoising loop
|
| 173 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 174 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 175 |
+
for i, t in enumerate(timesteps):
|
| 176 |
+
# expand the latents if we are doing classifier free guidance
|
| 177 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 178 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 179 |
+
|
| 180 |
+
# predict the noise residual
|
| 181 |
+
noise_pred = self.unet(
|
| 182 |
+
latent_model_input,
|
| 183 |
+
t,
|
| 184 |
+
encoder_hidden_states=prompt_embeds,
|
| 185 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 186 |
+
return_dict=False,
|
| 187 |
+
)[0]
|
| 188 |
+
|
| 189 |
+
# perform guidance
|
| 190 |
+
if do_classifier_free_guidance:
|
| 191 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 192 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 193 |
+
|
| 194 |
+
if latent_mask is not None:
|
| 195 |
+
latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask)
|
| 196 |
+
noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask)
|
| 197 |
+
|
| 198 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 199 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 200 |
+
|
| 201 |
+
# call the callback, if provided
|
| 202 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 203 |
+
progress_bar.update()
|
| 204 |
+
if callback is not None and i % callback_steps == 0:
|
| 205 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 206 |
+
callback(step_idx, t, latents)
|
| 207 |
+
|
| 208 |
+
if not output_type == "latent":
|
| 209 |
+
scaled = latents / self.vae.config.scaling_factor
|
| 210 |
+
if latent_mask is not None:
|
| 211 |
+
# scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask)
|
| 212 |
+
scaled = torch.lerp(init_latents, scaled, latent_mask)
|
| 213 |
+
image = self.vae.decode(scaled, return_dict=False)[0]
|
| 214 |
+
if self.debug_save:
|
| 215 |
+
image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 216 |
+
image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True])
|
| 217 |
+
image_gen[0].save("from_latent.png")
|
| 218 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 219 |
+
else:
|
| 220 |
+
image = latents
|
| 221 |
+
has_nsfw_concept = None
|
| 222 |
+
|
| 223 |
+
if has_nsfw_concept is None:
|
| 224 |
+
do_denormalize = [True] * image.shape[0]
|
| 225 |
+
else:
|
| 226 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 227 |
+
|
| 228 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 229 |
+
|
| 230 |
+
# Offload last model to CPU
|
| 231 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 232 |
+
self.final_offload_hook.offload()
|
| 233 |
+
|
| 234 |
+
if not return_dict:
|
| 235 |
+
return (image, has_nsfw_concept)
|
| 236 |
+
|
| 237 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 238 |
+
|
| 239 |
+
def _make_latent_mask(self, latents, mask):
|
| 240 |
+
if mask is not None:
|
| 241 |
+
latent_mask = []
|
| 242 |
+
if not isinstance(mask, list):
|
| 243 |
+
tmp_mask = [mask]
|
| 244 |
+
else:
|
| 245 |
+
tmp_mask = mask
|
| 246 |
+
_, l_channels, l_height, l_width = latents.shape
|
| 247 |
+
for m in tmp_mask:
|
| 248 |
+
if not isinstance(m, PIL.Image.Image):
|
| 249 |
+
if len(m.shape) == 2:
|
| 250 |
+
m = m[..., np.newaxis]
|
| 251 |
+
if m.max() > 1:
|
| 252 |
+
m = m / 255.0
|
| 253 |
+
m = self.image_processor.numpy_to_pil(m)[0]
|
| 254 |
+
if m.mode != "L":
|
| 255 |
+
m = m.convert("L")
|
| 256 |
+
resized = self.image_processor.resize(m, l_height, l_width)
|
| 257 |
+
if self.debug_save:
|
| 258 |
+
resized.save("latent_mask.png")
|
| 259 |
+
latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
|
| 260 |
+
latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
|
| 261 |
+
latent_mask = latent_mask / latent_mask.max()
|
| 262 |
+
return latent_mask
|
v0.27.0/mixture_canvas.py
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from dataclasses import asdict, dataclass
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import List, Optional, Union
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from numpy import exp, pi, sqrt
|
| 10 |
+
from torchvision.transforms.functional import resize
|
| 11 |
+
from tqdm.auto import tqdm
|
| 12 |
+
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
| 13 |
+
|
| 14 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 15 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 16 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 17 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def preprocess_image(image):
|
| 21 |
+
from PIL import Image
|
| 22 |
+
|
| 23 |
+
"""Preprocess an input image
|
| 24 |
+
|
| 25 |
+
Same as
|
| 26 |
+
https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
|
| 27 |
+
"""
|
| 28 |
+
w, h = image.size
|
| 29 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 30 |
+
image = image.resize((w, h), resample=Image.LANCZOS)
|
| 31 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 32 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 33 |
+
image = torch.from_numpy(image)
|
| 34 |
+
return 2.0 * image - 1.0
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class CanvasRegion:
|
| 39 |
+
"""Class defining a rectangular region in the canvas"""
|
| 40 |
+
|
| 41 |
+
row_init: int # Region starting row in pixel space (included)
|
| 42 |
+
row_end: int # Region end row in pixel space (not included)
|
| 43 |
+
col_init: int # Region starting column in pixel space (included)
|
| 44 |
+
col_end: int # Region end column in pixel space (not included)
|
| 45 |
+
region_seed: int = None # Seed for random operations in this region
|
| 46 |
+
noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
|
| 47 |
+
|
| 48 |
+
def __post_init__(self):
|
| 49 |
+
# Initialize arguments if not specified
|
| 50 |
+
if self.region_seed is None:
|
| 51 |
+
self.region_seed = np.random.randint(9999999999)
|
| 52 |
+
# Check coordinates are non-negative
|
| 53 |
+
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
|
| 54 |
+
if coord < 0:
|
| 55 |
+
raise ValueError(
|
| 56 |
+
f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})"
|
| 57 |
+
)
|
| 58 |
+
# Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
|
| 59 |
+
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
|
| 60 |
+
if coord // 8 != coord / 8:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})"
|
| 63 |
+
)
|
| 64 |
+
# Check noise eps is non-negative
|
| 65 |
+
if self.noise_eps < 0:
|
| 66 |
+
raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
|
| 67 |
+
# Compute coordinates for this region in latent space
|
| 68 |
+
self.latent_row_init = self.row_init // 8
|
| 69 |
+
self.latent_row_end = self.row_end // 8
|
| 70 |
+
self.latent_col_init = self.col_init // 8
|
| 71 |
+
self.latent_col_end = self.col_end // 8
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def width(self):
|
| 75 |
+
return self.col_end - self.col_init
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def height(self):
|
| 79 |
+
return self.row_end - self.row_init
|
| 80 |
+
|
| 81 |
+
def get_region_generator(self, device="cpu"):
|
| 82 |
+
"""Creates a torch.Generator based on the random seed of this region"""
|
| 83 |
+
# Initialize region generator
|
| 84 |
+
return torch.Generator(device).manual_seed(self.region_seed)
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def __dict__(self):
|
| 88 |
+
return asdict(self)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class MaskModes(Enum):
|
| 92 |
+
"""Modes in which the influence of diffuser is masked"""
|
| 93 |
+
|
| 94 |
+
CONSTANT = "constant"
|
| 95 |
+
GAUSSIAN = "gaussian"
|
| 96 |
+
QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class DiffusionRegion(CanvasRegion):
|
| 101 |
+
"""Abstract class defining a region where some class of diffusion process is acting"""
|
| 102 |
+
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@dataclass
|
| 107 |
+
class Text2ImageRegion(DiffusionRegion):
|
| 108 |
+
"""Class defining a region where a text guided diffusion process is acting"""
|
| 109 |
+
|
| 110 |
+
prompt: str = "" # Text prompt guiding the diffuser in this region
|
| 111 |
+
guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
|
| 112 |
+
mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
|
| 113 |
+
mask_weight: float = 1.0 # Global weights multiplier of the mask
|
| 114 |
+
tokenized_prompt = None # Tokenized prompt
|
| 115 |
+
encoded_prompt = None # Encoded prompt
|
| 116 |
+
|
| 117 |
+
def __post_init__(self):
|
| 118 |
+
super().__post_init__()
|
| 119 |
+
# Mask weight cannot be negative
|
| 120 |
+
if self.mask_weight < 0:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}"
|
| 123 |
+
)
|
| 124 |
+
# Mask type must be an actual known mask
|
| 125 |
+
if self.mask_type not in [e.value for e in MaskModes]:
|
| 126 |
+
raise ValueError(
|
| 127 |
+
f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})"
|
| 128 |
+
)
|
| 129 |
+
# Randomize arguments if given as None
|
| 130 |
+
if self.guidance_scale is None:
|
| 131 |
+
self.guidance_scale = np.random.randint(5, 30)
|
| 132 |
+
# Clean prompt
|
| 133 |
+
self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ")
|
| 134 |
+
|
| 135 |
+
def tokenize_prompt(self, tokenizer):
|
| 136 |
+
"""Tokenizes the prompt for this diffusion region using a given tokenizer"""
|
| 137 |
+
self.tokenized_prompt = tokenizer(
|
| 138 |
+
self.prompt,
|
| 139 |
+
padding="max_length",
|
| 140 |
+
max_length=tokenizer.model_max_length,
|
| 141 |
+
truncation=True,
|
| 142 |
+
return_tensors="pt",
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def encode_prompt(self, text_encoder, device):
|
| 146 |
+
"""Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
|
| 147 |
+
assert self.tokenized_prompt is not None, ValueError(
|
| 148 |
+
"Prompt in diffusion region must be tokenized before encoding"
|
| 149 |
+
)
|
| 150 |
+
self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@dataclass
|
| 154 |
+
class Image2ImageRegion(DiffusionRegion):
|
| 155 |
+
"""Class defining a region where an image guided diffusion process is acting"""
|
| 156 |
+
|
| 157 |
+
reference_image: torch.FloatTensor = None
|
| 158 |
+
strength: float = 0.8 # Strength of the image
|
| 159 |
+
|
| 160 |
+
def __post_init__(self):
|
| 161 |
+
super().__post_init__()
|
| 162 |
+
if self.reference_image is None:
|
| 163 |
+
raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
|
| 164 |
+
if self.strength < 0 or self.strength > 1:
|
| 165 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}")
|
| 166 |
+
# Rescale image to region shape
|
| 167 |
+
self.reference_image = resize(self.reference_image, size=[self.height, self.width])
|
| 168 |
+
|
| 169 |
+
def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
|
| 170 |
+
"""Encodes the reference image for this Image2Image region into the latent space"""
|
| 171 |
+
# Place encoder in CPU or not following the parameter cpu_vae
|
| 172 |
+
if cpu_vae:
|
| 173 |
+
# Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
|
| 174 |
+
self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
|
| 175 |
+
else:
|
| 176 |
+
self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(
|
| 177 |
+
generator=generator
|
| 178 |
+
)
|
| 179 |
+
self.reference_latents = 0.18215 * self.reference_latents
|
| 180 |
+
|
| 181 |
+
@property
|
| 182 |
+
def __dict__(self):
|
| 183 |
+
# This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
|
| 184 |
+
|
| 185 |
+
# Get all basic fields from parent class
|
| 186 |
+
super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
|
| 187 |
+
# Pack other fields
|
| 188 |
+
return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength}
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class RerollModes(Enum):
|
| 192 |
+
"""Modes in which the reroll regions operate"""
|
| 193 |
+
|
| 194 |
+
RESET = "reset" # Completely reset the random noise in the region
|
| 195 |
+
EPSILON = "epsilon" # Alter slightly the latents in the region
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@dataclass
|
| 199 |
+
class RerollRegion(CanvasRegion):
|
| 200 |
+
"""Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
|
| 201 |
+
|
| 202 |
+
reroll_mode: RerollModes = RerollModes.RESET.value
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@dataclass
|
| 206 |
+
class MaskWeightsBuilder:
|
| 207 |
+
"""Auxiliary class to compute a tensor of weights for a given diffusion region"""
|
| 208 |
+
|
| 209 |
+
latent_space_dim: int # Size of the U-net latent space
|
| 210 |
+
nbatch: int = 1 # Batch size in the U-net
|
| 211 |
+
|
| 212 |
+
def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 213 |
+
"""Computes a tensor of weights for a given diffusion region"""
|
| 214 |
+
MASK_BUILDERS = {
|
| 215 |
+
MaskModes.CONSTANT.value: self._constant_weights,
|
| 216 |
+
MaskModes.GAUSSIAN.value: self._gaussian_weights,
|
| 217 |
+
MaskModes.QUARTIC.value: self._quartic_weights,
|
| 218 |
+
}
|
| 219 |
+
return MASK_BUILDERS[region.mask_type](region)
|
| 220 |
+
|
| 221 |
+
def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 222 |
+
"""Computes a tensor of constant for a given diffusion region"""
|
| 223 |
+
latent_width = region.latent_col_end - region.latent_col_init
|
| 224 |
+
latent_height = region.latent_row_end - region.latent_row_init
|
| 225 |
+
return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
|
| 226 |
+
|
| 227 |
+
def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 228 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
| 229 |
+
latent_width = region.latent_col_end - region.latent_col_init
|
| 230 |
+
latent_height = region.latent_row_end - region.latent_row_init
|
| 231 |
+
|
| 232 |
+
var = 0.01
|
| 233 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
| 234 |
+
x_probs = [
|
| 235 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
| 236 |
+
for x in range(latent_width)
|
| 237 |
+
]
|
| 238 |
+
midpoint = (latent_height - 1) / 2
|
| 239 |
+
y_probs = [
|
| 240 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
| 241 |
+
for y in range(latent_height)
|
| 242 |
+
]
|
| 243 |
+
|
| 244 |
+
weights = np.outer(y_probs, x_probs) * region.mask_weight
|
| 245 |
+
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
|
| 246 |
+
|
| 247 |
+
def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 248 |
+
"""Generates a quartic mask of weights for tile contributions
|
| 249 |
+
|
| 250 |
+
The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
|
| 251 |
+
"""
|
| 252 |
+
quartic_constant = 15.0 / 16.0
|
| 253 |
+
|
| 254 |
+
support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (
|
| 255 |
+
region.latent_col_end - region.latent_col_init - 1
|
| 256 |
+
) * 1.99 - (1.99 / 2.0)
|
| 257 |
+
x_probs = quartic_constant * np.square(1 - np.square(support))
|
| 258 |
+
support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (
|
| 259 |
+
region.latent_row_end - region.latent_row_init - 1
|
| 260 |
+
) * 1.99 - (1.99 / 2.0)
|
| 261 |
+
y_probs = quartic_constant * np.square(1 - np.square(support))
|
| 262 |
+
|
| 263 |
+
weights = np.outer(y_probs, x_probs) * region.mask_weight
|
| 264 |
+
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
class StableDiffusionCanvasPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 268 |
+
"""Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
|
| 269 |
+
|
| 270 |
+
def __init__(
|
| 271 |
+
self,
|
| 272 |
+
vae: AutoencoderKL,
|
| 273 |
+
text_encoder: CLIPTextModel,
|
| 274 |
+
tokenizer: CLIPTokenizer,
|
| 275 |
+
unet: UNet2DConditionModel,
|
| 276 |
+
scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
|
| 277 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 278 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 279 |
+
):
|
| 280 |
+
super().__init__()
|
| 281 |
+
self.register_modules(
|
| 282 |
+
vae=vae,
|
| 283 |
+
text_encoder=text_encoder,
|
| 284 |
+
tokenizer=tokenizer,
|
| 285 |
+
unet=unet,
|
| 286 |
+
scheduler=scheduler,
|
| 287 |
+
safety_checker=safety_checker,
|
| 288 |
+
feature_extractor=feature_extractor,
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
def decode_latents(self, latents, cpu_vae=False):
|
| 292 |
+
"""Decodes a given array of latents into pixel space"""
|
| 293 |
+
# scale and decode the image latents with vae
|
| 294 |
+
if cpu_vae:
|
| 295 |
+
lat = deepcopy(latents).cpu()
|
| 296 |
+
vae = deepcopy(self.vae).cpu()
|
| 297 |
+
else:
|
| 298 |
+
lat = latents
|
| 299 |
+
vae = self.vae
|
| 300 |
+
|
| 301 |
+
lat = 1 / 0.18215 * lat
|
| 302 |
+
image = vae.decode(lat).sample
|
| 303 |
+
|
| 304 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 305 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 306 |
+
|
| 307 |
+
return self.numpy_to_pil(image)
|
| 308 |
+
|
| 309 |
+
def get_latest_timestep_img2img(self, num_inference_steps, strength):
|
| 310 |
+
"""Finds the latest timesteps where an img2img strength does not impose latents anymore"""
|
| 311 |
+
# get the original timestep using init_timestep
|
| 312 |
+
offset = self.scheduler.config.get("steps_offset", 0)
|
| 313 |
+
init_timestep = int(num_inference_steps * (1 - strength)) + offset
|
| 314 |
+
init_timestep = min(init_timestep, num_inference_steps)
|
| 315 |
+
|
| 316 |
+
t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1)
|
| 317 |
+
latest_timestep = self.scheduler.timesteps[t_start]
|
| 318 |
+
|
| 319 |
+
return latest_timestep
|
| 320 |
+
|
| 321 |
+
@torch.no_grad()
|
| 322 |
+
def __call__(
|
| 323 |
+
self,
|
| 324 |
+
canvas_height: int,
|
| 325 |
+
canvas_width: int,
|
| 326 |
+
regions: List[DiffusionRegion],
|
| 327 |
+
num_inference_steps: Optional[int] = 50,
|
| 328 |
+
seed: Optional[int] = 12345,
|
| 329 |
+
reroll_regions: Optional[List[RerollRegion]] = None,
|
| 330 |
+
cpu_vae: Optional[bool] = False,
|
| 331 |
+
decode_steps: Optional[bool] = False,
|
| 332 |
+
):
|
| 333 |
+
if reroll_regions is None:
|
| 334 |
+
reroll_regions = []
|
| 335 |
+
batch_size = 1
|
| 336 |
+
|
| 337 |
+
if decode_steps:
|
| 338 |
+
steps_images = []
|
| 339 |
+
|
| 340 |
+
# Prepare scheduler
|
| 341 |
+
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
| 342 |
+
|
| 343 |
+
# Split diffusion regions by their kind
|
| 344 |
+
text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
|
| 345 |
+
image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
|
| 346 |
+
|
| 347 |
+
# Prepare text embeddings
|
| 348 |
+
for region in text2image_regions:
|
| 349 |
+
region.tokenize_prompt(self.tokenizer)
|
| 350 |
+
region.encode_prompt(self.text_encoder, self.device)
|
| 351 |
+
|
| 352 |
+
# Create original noisy latents using the timesteps
|
| 353 |
+
latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
|
| 354 |
+
generator = torch.Generator(self.device).manual_seed(seed)
|
| 355 |
+
init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
|
| 356 |
+
|
| 357 |
+
# Reset latents in seed reroll regions, if requested
|
| 358 |
+
for region in reroll_regions:
|
| 359 |
+
if region.reroll_mode == RerollModes.RESET.value:
|
| 360 |
+
region_shape = (
|
| 361 |
+
latents_shape[0],
|
| 362 |
+
latents_shape[1],
|
| 363 |
+
region.latent_row_end - region.latent_row_init,
|
| 364 |
+
region.latent_col_end - region.latent_col_init,
|
| 365 |
+
)
|
| 366 |
+
init_noise[
|
| 367 |
+
:,
|
| 368 |
+
:,
|
| 369 |
+
region.latent_row_init : region.latent_row_end,
|
| 370 |
+
region.latent_col_init : region.latent_col_end,
|
| 371 |
+
] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
|
| 372 |
+
|
| 373 |
+
# Apply epsilon noise to regions: first diffusion regions, then reroll regions
|
| 374 |
+
all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
|
| 375 |
+
for region in all_eps_rerolls:
|
| 376 |
+
if region.noise_eps > 0:
|
| 377 |
+
region_noise = init_noise[
|
| 378 |
+
:,
|
| 379 |
+
:,
|
| 380 |
+
region.latent_row_init : region.latent_row_end,
|
| 381 |
+
region.latent_col_init : region.latent_col_end,
|
| 382 |
+
]
|
| 383 |
+
eps_noise = (
|
| 384 |
+
torch.randn(
|
| 385 |
+
region_noise.shape, generator=region.get_region_generator(self.device), device=self.device
|
| 386 |
+
)
|
| 387 |
+
* region.noise_eps
|
| 388 |
+
)
|
| 389 |
+
init_noise[
|
| 390 |
+
:,
|
| 391 |
+
:,
|
| 392 |
+
region.latent_row_init : region.latent_row_end,
|
| 393 |
+
region.latent_col_init : region.latent_col_end,
|
| 394 |
+
] += eps_noise
|
| 395 |
+
|
| 396 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 397 |
+
latents = init_noise * self.scheduler.init_noise_sigma
|
| 398 |
+
|
| 399 |
+
# Get unconditional embeddings for classifier free guidance in text2image regions
|
| 400 |
+
for region in text2image_regions:
|
| 401 |
+
max_length = region.tokenized_prompt.input_ids.shape[-1]
|
| 402 |
+
uncond_input = self.tokenizer(
|
| 403 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
| 404 |
+
)
|
| 405 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 406 |
+
|
| 407 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 408 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 409 |
+
# to avoid doing two forward passes
|
| 410 |
+
region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
|
| 411 |
+
|
| 412 |
+
# Prepare image latents
|
| 413 |
+
for region in image2image_regions:
|
| 414 |
+
region.encode_reference_image(self.vae, device=self.device, generator=generator)
|
| 415 |
+
|
| 416 |
+
# Prepare mask of weights for each region
|
| 417 |
+
mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
|
| 418 |
+
mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
|
| 419 |
+
|
| 420 |
+
# Diffusion timesteps
|
| 421 |
+
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
| 422 |
+
# Diffuse each region
|
| 423 |
+
noise_preds_regions = []
|
| 424 |
+
|
| 425 |
+
# text2image regions
|
| 426 |
+
for region in text2image_regions:
|
| 427 |
+
region_latents = latents[
|
| 428 |
+
:,
|
| 429 |
+
:,
|
| 430 |
+
region.latent_row_init : region.latent_row_end,
|
| 431 |
+
region.latent_col_init : region.latent_col_end,
|
| 432 |
+
]
|
| 433 |
+
# expand the latents if we are doing classifier free guidance
|
| 434 |
+
latent_model_input = torch.cat([region_latents] * 2)
|
| 435 |
+
# scale model input following scheduler rules
|
| 436 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 437 |
+
# predict the noise residual
|
| 438 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
|
| 439 |
+
# perform guidance
|
| 440 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 441 |
+
noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 442 |
+
noise_preds_regions.append(noise_pred_region)
|
| 443 |
+
|
| 444 |
+
# Merge noise predictions for all tiles
|
| 445 |
+
noise_pred = torch.zeros(latents.shape, device=self.device)
|
| 446 |
+
contributors = torch.zeros(latents.shape, device=self.device)
|
| 447 |
+
# Add each tile contribution to overall latents
|
| 448 |
+
for region, noise_pred_region, mask_weights_region in zip(
|
| 449 |
+
text2image_regions, noise_preds_regions, mask_weights
|
| 450 |
+
):
|
| 451 |
+
noise_pred[
|
| 452 |
+
:,
|
| 453 |
+
:,
|
| 454 |
+
region.latent_row_init : region.latent_row_end,
|
| 455 |
+
region.latent_col_init : region.latent_col_end,
|
| 456 |
+
] += noise_pred_region * mask_weights_region
|
| 457 |
+
contributors[
|
| 458 |
+
:,
|
| 459 |
+
:,
|
| 460 |
+
region.latent_row_init : region.latent_row_end,
|
| 461 |
+
region.latent_col_init : region.latent_col_end,
|
| 462 |
+
] += mask_weights_region
|
| 463 |
+
# Average overlapping areas with more than 1 contributor
|
| 464 |
+
noise_pred /= contributors
|
| 465 |
+
noise_pred = torch.nan_to_num(
|
| 466 |
+
noise_pred
|
| 467 |
+
) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
|
| 468 |
+
|
| 469 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 470 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 471 |
+
|
| 472 |
+
# Image2Image regions: override latents generated by the scheduler
|
| 473 |
+
for region in image2image_regions:
|
| 474 |
+
influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
|
| 475 |
+
# Only override in the timesteps before the last influence step of the image (given by its strength)
|
| 476 |
+
if t > influence_step:
|
| 477 |
+
timestep = t.repeat(batch_size)
|
| 478 |
+
region_init_noise = init_noise[
|
| 479 |
+
:,
|
| 480 |
+
:,
|
| 481 |
+
region.latent_row_init : region.latent_row_end,
|
| 482 |
+
region.latent_col_init : region.latent_col_end,
|
| 483 |
+
]
|
| 484 |
+
region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
|
| 485 |
+
latents[
|
| 486 |
+
:,
|
| 487 |
+
:,
|
| 488 |
+
region.latent_row_init : region.latent_row_end,
|
| 489 |
+
region.latent_col_init : region.latent_col_end,
|
| 490 |
+
] = region_latents
|
| 491 |
+
|
| 492 |
+
if decode_steps:
|
| 493 |
+
steps_images.append(self.decode_latents(latents, cpu_vae))
|
| 494 |
+
|
| 495 |
+
# scale and decode the image latents with vae
|
| 496 |
+
image = self.decode_latents(latents, cpu_vae)
|
| 497 |
+
|
| 498 |
+
output = {"images": image}
|
| 499 |
+
if decode_steps:
|
| 500 |
+
output = {**output, "steps_images": steps_images}
|
| 501 |
+
return output
|
v0.27.0/mixture_tiling.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from tqdm.auto import tqdm
|
| 8 |
+
|
| 9 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 10 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 11 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 12 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 13 |
+
from diffusers.utils import logging
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from ligo.segments import segment
|
| 18 |
+
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
| 19 |
+
except ImportError:
|
| 20 |
+
raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 23 |
+
|
| 24 |
+
EXAMPLE_DOC_STRING = """
|
| 25 |
+
Examples:
|
| 26 |
+
```py
|
| 27 |
+
>>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
| 28 |
+
|
| 29 |
+
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 30 |
+
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
|
| 31 |
+
>>> pipeline.to("cuda")
|
| 32 |
+
|
| 33 |
+
>>> image = pipeline(
|
| 34 |
+
>>> prompt=[[
|
| 35 |
+
>>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
| 36 |
+
>>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
| 37 |
+
>>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
| 38 |
+
>>> ]],
|
| 39 |
+
>>> tile_height=640,
|
| 40 |
+
>>> tile_width=640,
|
| 41 |
+
>>> tile_row_overlap=0,
|
| 42 |
+
>>> tile_col_overlap=256,
|
| 43 |
+
>>> guidance_scale=8,
|
| 44 |
+
>>> seed=7178915308,
|
| 45 |
+
>>> num_inference_steps=50,
|
| 46 |
+
>>> )["images"][0]
|
| 47 |
+
```
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 52 |
+
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
| 53 |
+
|
| 54 |
+
Returns a tuple with:
|
| 55 |
+
- Starting coordinates of rows in pixel space
|
| 56 |
+
- Ending coordinates of rows in pixel space
|
| 57 |
+
- Starting coordinates of columns in pixel space
|
| 58 |
+
- Ending coordinates of columns in pixel space
|
| 59 |
+
"""
|
| 60 |
+
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
| 61 |
+
px_row_end = px_row_init + tile_height
|
| 62 |
+
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
| 63 |
+
px_col_end = px_col_init + tile_width
|
| 64 |
+
return px_row_init, px_row_end, px_col_init, px_col_end
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
| 68 |
+
"""Translates coordinates in pixel space to coordinates in latent space"""
|
| 69 |
+
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 73 |
+
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
| 74 |
+
|
| 75 |
+
Returns a tuple with:
|
| 76 |
+
- Starting coordinates of rows in latent space
|
| 77 |
+
- Ending coordinates of rows in latent space
|
| 78 |
+
- Starting coordinates of columns in latent space
|
| 79 |
+
- Ending coordinates of columns in latent space
|
| 80 |
+
"""
|
| 81 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 82 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 83 |
+
)
|
| 84 |
+
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _tile2latent_exclusive_indices(
|
| 88 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
| 89 |
+
):
|
| 90 |
+
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
| 91 |
+
|
| 92 |
+
Returns a tuple with:
|
| 93 |
+
- Starting coordinates of rows in latent space
|
| 94 |
+
- Ending coordinates of rows in latent space
|
| 95 |
+
- Starting coordinates of columns in latent space
|
| 96 |
+
- Ending coordinates of columns in latent space
|
| 97 |
+
"""
|
| 98 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 99 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 100 |
+
)
|
| 101 |
+
row_segment = segment(row_init, row_end)
|
| 102 |
+
col_segment = segment(col_init, col_end)
|
| 103 |
+
# Iterate over the rest of tiles, clipping the region for the current tile
|
| 104 |
+
for row in range(rows):
|
| 105 |
+
for column in range(columns):
|
| 106 |
+
if row != tile_row and column != tile_col:
|
| 107 |
+
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
| 108 |
+
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 109 |
+
)
|
| 110 |
+
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
| 111 |
+
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
| 112 |
+
# return row_init, row_end, col_init, col_end
|
| 113 |
+
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class StableDiffusionExtrasMixin:
|
| 117 |
+
"""Mixin providing additional convenience method to Stable Diffusion pipelines"""
|
| 118 |
+
|
| 119 |
+
def decode_latents(self, latents, cpu_vae=False):
|
| 120 |
+
"""Decodes a given array of latents into pixel space"""
|
| 121 |
+
# scale and decode the image latents with vae
|
| 122 |
+
if cpu_vae:
|
| 123 |
+
lat = deepcopy(latents).cpu()
|
| 124 |
+
vae = deepcopy(self.vae).cpu()
|
| 125 |
+
else:
|
| 126 |
+
lat = latents
|
| 127 |
+
vae = self.vae
|
| 128 |
+
|
| 129 |
+
lat = 1 / 0.18215 * lat
|
| 130 |
+
image = vae.decode(lat).sample
|
| 131 |
+
|
| 132 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 133 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 134 |
+
|
| 135 |
+
return self.numpy_to_pil(image)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
|
| 139 |
+
def __init__(
|
| 140 |
+
self,
|
| 141 |
+
vae: AutoencoderKL,
|
| 142 |
+
text_encoder: CLIPTextModel,
|
| 143 |
+
tokenizer: CLIPTokenizer,
|
| 144 |
+
unet: UNet2DConditionModel,
|
| 145 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler],
|
| 146 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 147 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 148 |
+
):
|
| 149 |
+
super().__init__()
|
| 150 |
+
self.register_modules(
|
| 151 |
+
vae=vae,
|
| 152 |
+
text_encoder=text_encoder,
|
| 153 |
+
tokenizer=tokenizer,
|
| 154 |
+
unet=unet,
|
| 155 |
+
scheduler=scheduler,
|
| 156 |
+
safety_checker=safety_checker,
|
| 157 |
+
feature_extractor=feature_extractor,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
class SeedTilesMode(Enum):
|
| 161 |
+
"""Modes in which the latents of a particular tile can be re-seeded"""
|
| 162 |
+
|
| 163 |
+
FULL = "full"
|
| 164 |
+
EXCLUSIVE = "exclusive"
|
| 165 |
+
|
| 166 |
+
@torch.no_grad()
|
| 167 |
+
def __call__(
|
| 168 |
+
self,
|
| 169 |
+
prompt: Union[str, List[List[str]]],
|
| 170 |
+
num_inference_steps: Optional[int] = 50,
|
| 171 |
+
guidance_scale: Optional[float] = 7.5,
|
| 172 |
+
eta: Optional[float] = 0.0,
|
| 173 |
+
seed: Optional[int] = None,
|
| 174 |
+
tile_height: Optional[int] = 512,
|
| 175 |
+
tile_width: Optional[int] = 512,
|
| 176 |
+
tile_row_overlap: Optional[int] = 256,
|
| 177 |
+
tile_col_overlap: Optional[int] = 256,
|
| 178 |
+
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
| 179 |
+
seed_tiles: Optional[List[List[int]]] = None,
|
| 180 |
+
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
| 181 |
+
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
| 182 |
+
cpu_vae: Optional[bool] = False,
|
| 183 |
+
):
|
| 184 |
+
r"""
|
| 185 |
+
Function to run the diffusion pipeline with tiling support.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
|
| 189 |
+
num_inference_steps: number of diffusions steps.
|
| 190 |
+
guidance_scale: classifier-free guidance.
|
| 191 |
+
seed: general random seed to initialize latents.
|
| 192 |
+
tile_height: height in pixels of each grid tile.
|
| 193 |
+
tile_width: width in pixels of each grid tile.
|
| 194 |
+
tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
|
| 195 |
+
tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
|
| 196 |
+
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
| 197 |
+
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
| 198 |
+
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
| 199 |
+
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overrriden.
|
| 200 |
+
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
|
| 201 |
+
cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
|
| 202 |
+
|
| 203 |
+
Examples:
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
A PIL image with the generated image.
|
| 207 |
+
|
| 208 |
+
"""
|
| 209 |
+
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
| 210 |
+
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
| 211 |
+
grid_rows = len(prompt)
|
| 212 |
+
grid_cols = len(prompt[0])
|
| 213 |
+
if not all(len(row) == grid_cols for row in prompt):
|
| 214 |
+
raise ValueError("All prompt rows must have the same number of prompt columns")
|
| 215 |
+
if not isinstance(seed_tiles_mode, str) and (
|
| 216 |
+
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
| 217 |
+
):
|
| 218 |
+
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
| 219 |
+
if isinstance(seed_tiles_mode, str):
|
| 220 |
+
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
| 221 |
+
|
| 222 |
+
modes = [mode.value for mode in self.SeedTilesMode]
|
| 223 |
+
if any(mode not in modes for row in seed_tiles_mode for mode in row):
|
| 224 |
+
raise ValueError(f"Seed tiles mode must be one of {modes}")
|
| 225 |
+
if seed_reroll_regions is None:
|
| 226 |
+
seed_reroll_regions = []
|
| 227 |
+
batch_size = 1
|
| 228 |
+
|
| 229 |
+
# create original noisy latents using the timesteps
|
| 230 |
+
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
| 231 |
+
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
| 232 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
| 233 |
+
generator = torch.Generator("cuda").manual_seed(seed)
|
| 234 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device)
|
| 235 |
+
|
| 236 |
+
# overwrite latents for specific tiles if provided
|
| 237 |
+
if seed_tiles is not None:
|
| 238 |
+
for row in range(grid_rows):
|
| 239 |
+
for col in range(grid_cols):
|
| 240 |
+
if (seed_tile := seed_tiles[row][col]) is not None:
|
| 241 |
+
mode = seed_tiles_mode[row][col]
|
| 242 |
+
if mode == self.SeedTilesMode.FULL.value:
|
| 243 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 244 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 245 |
+
)
|
| 246 |
+
else:
|
| 247 |
+
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
| 248 |
+
row,
|
| 249 |
+
col,
|
| 250 |
+
tile_width,
|
| 251 |
+
tile_height,
|
| 252 |
+
tile_row_overlap,
|
| 253 |
+
tile_col_overlap,
|
| 254 |
+
grid_rows,
|
| 255 |
+
grid_cols,
|
| 256 |
+
)
|
| 257 |
+
tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
|
| 258 |
+
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 259 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 260 |
+
tile_shape, generator=tile_generator, device=self.device
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
# overwrite again for seed reroll regions
|
| 264 |
+
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
| 265 |
+
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
| 266 |
+
row_init, row_end, col_init, col_end
|
| 267 |
+
) # to latent space coordinates
|
| 268 |
+
reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
|
| 269 |
+
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 270 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 271 |
+
region_shape, generator=reroll_generator, device=self.device
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# Prepare scheduler
|
| 275 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 276 |
+
extra_set_kwargs = {}
|
| 277 |
+
if accepts_offset:
|
| 278 |
+
extra_set_kwargs["offset"] = 1
|
| 279 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 280 |
+
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
| 281 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 282 |
+
latents = latents * self.scheduler.sigmas[0]
|
| 283 |
+
|
| 284 |
+
# get prompts text embeddings
|
| 285 |
+
text_input = [
|
| 286 |
+
[
|
| 287 |
+
self.tokenizer(
|
| 288 |
+
col,
|
| 289 |
+
padding="max_length",
|
| 290 |
+
max_length=self.tokenizer.model_max_length,
|
| 291 |
+
truncation=True,
|
| 292 |
+
return_tensors="pt",
|
| 293 |
+
)
|
| 294 |
+
for col in row
|
| 295 |
+
]
|
| 296 |
+
for row in prompt
|
| 297 |
+
]
|
| 298 |
+
text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
|
| 299 |
+
|
| 300 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 301 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 302 |
+
# corresponds to doing no classifier free guidance.
|
| 303 |
+
do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
|
| 304 |
+
# get unconditional embeddings for classifier free guidance
|
| 305 |
+
if do_classifier_free_guidance:
|
| 306 |
+
for i in range(grid_rows):
|
| 307 |
+
for j in range(grid_cols):
|
| 308 |
+
max_length = text_input[i][j].input_ids.shape[-1]
|
| 309 |
+
uncond_input = self.tokenizer(
|
| 310 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
| 311 |
+
)
|
| 312 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 313 |
+
|
| 314 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 315 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 316 |
+
# to avoid doing two forward passes
|
| 317 |
+
text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
|
| 318 |
+
|
| 319 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 320 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 321 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 322 |
+
# and should be between [0, 1]
|
| 323 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 324 |
+
extra_step_kwargs = {}
|
| 325 |
+
if accepts_eta:
|
| 326 |
+
extra_step_kwargs["eta"] = eta
|
| 327 |
+
|
| 328 |
+
# Mask for tile weights strenght
|
| 329 |
+
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
|
| 330 |
+
|
| 331 |
+
# Diffusion timesteps
|
| 332 |
+
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
| 333 |
+
# Diffuse each tile
|
| 334 |
+
noise_preds = []
|
| 335 |
+
for row in range(grid_rows):
|
| 336 |
+
noise_preds_row = []
|
| 337 |
+
for col in range(grid_cols):
|
| 338 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 339 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 340 |
+
)
|
| 341 |
+
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
| 342 |
+
# expand the latents if we are doing classifier free guidance
|
| 343 |
+
latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
|
| 344 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 345 |
+
# predict the noise residual
|
| 346 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
|
| 347 |
+
"sample"
|
| 348 |
+
]
|
| 349 |
+
# perform guidance
|
| 350 |
+
if do_classifier_free_guidance:
|
| 351 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 352 |
+
guidance = (
|
| 353 |
+
guidance_scale
|
| 354 |
+
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
| 355 |
+
else guidance_scale_tiles[row][col]
|
| 356 |
+
)
|
| 357 |
+
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
| 358 |
+
noise_preds_row.append(noise_pred_tile)
|
| 359 |
+
noise_preds.append(noise_preds_row)
|
| 360 |
+
# Stitch noise predictions for all tiles
|
| 361 |
+
noise_pred = torch.zeros(latents.shape, device=self.device)
|
| 362 |
+
contributors = torch.zeros(latents.shape, device=self.device)
|
| 363 |
+
# Add each tile contribution to overall latents
|
| 364 |
+
for row in range(grid_rows):
|
| 365 |
+
for col in range(grid_cols):
|
| 366 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 367 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 368 |
+
)
|
| 369 |
+
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
| 370 |
+
noise_preds[row][col] * tile_weights
|
| 371 |
+
)
|
| 372 |
+
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
| 373 |
+
# Average overlapping areas with more than 1 contributor
|
| 374 |
+
noise_pred /= contributors
|
| 375 |
+
|
| 376 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 377 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 378 |
+
|
| 379 |
+
# scale and decode the image latents with vae
|
| 380 |
+
image = self.decode_latents(latents, cpu_vae)
|
| 381 |
+
|
| 382 |
+
return {"images": image}
|
| 383 |
+
|
| 384 |
+
def _gaussian_weights(self, tile_width, tile_height, nbatches):
|
| 385 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
| 386 |
+
import numpy as np
|
| 387 |
+
from numpy import exp, pi, sqrt
|
| 388 |
+
|
| 389 |
+
latent_width = tile_width // 8
|
| 390 |
+
latent_height = tile_height // 8
|
| 391 |
+
|
| 392 |
+
var = 0.01
|
| 393 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
| 394 |
+
x_probs = [
|
| 395 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
| 396 |
+
for x in range(latent_width)
|
| 397 |
+
]
|
| 398 |
+
midpoint = latent_height / 2
|
| 399 |
+
y_probs = [
|
| 400 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
| 401 |
+
for y in range(latent_height)
|
| 402 |
+
]
|
| 403 |
+
|
| 404 |
+
weights = np.outer(y_probs, x_probs)
|
| 405 |
+
return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
|
v0.27.0/multilingual_stable_diffusion.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Callable, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from transformers import (
|
| 6 |
+
CLIPImageProcessor,
|
| 7 |
+
CLIPTextModel,
|
| 8 |
+
CLIPTokenizer,
|
| 9 |
+
MBart50TokenizerFast,
|
| 10 |
+
MBartForConditionalGeneration,
|
| 11 |
+
pipeline,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from diffusers.configuration_utils import FrozenDict
|
| 15 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 16 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 17 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 18 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 19 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 20 |
+
from diffusers.utils import deprecate, logging
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def detect_language(pipe, prompt, batch_size):
|
| 27 |
+
"""helper function to detect language(s) of prompt"""
|
| 28 |
+
|
| 29 |
+
if batch_size == 1:
|
| 30 |
+
preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
|
| 31 |
+
return preds[0]["label"]
|
| 32 |
+
else:
|
| 33 |
+
detected_languages = []
|
| 34 |
+
for p in prompt:
|
| 35 |
+
preds = pipe(p, top_k=1, truncation=True, max_length=128)
|
| 36 |
+
detected_languages.append(preds[0]["label"])
|
| 37 |
+
|
| 38 |
+
return detected_languages
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def translate_prompt(prompt, translation_tokenizer, translation_model, device):
|
| 42 |
+
"""helper function to translate prompt to English"""
|
| 43 |
+
|
| 44 |
+
encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
|
| 45 |
+
generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
|
| 46 |
+
en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
| 47 |
+
|
| 48 |
+
return en_trans[0]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 52 |
+
r"""
|
| 53 |
+
Pipeline for text-to-image generation using Stable Diffusion in different languages.
|
| 54 |
+
|
| 55 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 56 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
detection_pipeline ([`pipeline`]):
|
| 60 |
+
Transformers pipeline to detect prompt's language.
|
| 61 |
+
translation_model ([`MBartForConditionalGeneration`]):
|
| 62 |
+
Model to translate prompt to English, if necessary. Please refer to the
|
| 63 |
+
[model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
|
| 64 |
+
translation_tokenizer ([`MBart50TokenizerFast`]):
|
| 65 |
+
Tokenizer of the translation model.
|
| 66 |
+
vae ([`AutoencoderKL`]):
|
| 67 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 68 |
+
text_encoder ([`CLIPTextModel`]):
|
| 69 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 70 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 71 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 72 |
+
tokenizer (`CLIPTokenizer`):
|
| 73 |
+
Tokenizer of class
|
| 74 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 75 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 76 |
+
scheduler ([`SchedulerMixin`]):
|
| 77 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 78 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 82 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
detection_pipeline: pipeline,
|
| 89 |
+
translation_model: MBartForConditionalGeneration,
|
| 90 |
+
translation_tokenizer: MBart50TokenizerFast,
|
| 91 |
+
vae: AutoencoderKL,
|
| 92 |
+
text_encoder: CLIPTextModel,
|
| 93 |
+
tokenizer: CLIPTokenizer,
|
| 94 |
+
unet: UNet2DConditionModel,
|
| 95 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 96 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 97 |
+
feature_extractor: CLIPImageProcessor,
|
| 98 |
+
):
|
| 99 |
+
super().__init__()
|
| 100 |
+
|
| 101 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 102 |
+
deprecation_message = (
|
| 103 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 104 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 105 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 106 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 107 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 108 |
+
" file"
|
| 109 |
+
)
|
| 110 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 111 |
+
new_config = dict(scheduler.config)
|
| 112 |
+
new_config["steps_offset"] = 1
|
| 113 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 114 |
+
|
| 115 |
+
if safety_checker is None:
|
| 116 |
+
logger.warning(
|
| 117 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 118 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 119 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 120 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 121 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 122 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self.register_modules(
|
| 126 |
+
detection_pipeline=detection_pipeline,
|
| 127 |
+
translation_model=translation_model,
|
| 128 |
+
translation_tokenizer=translation_tokenizer,
|
| 129 |
+
vae=vae,
|
| 130 |
+
text_encoder=text_encoder,
|
| 131 |
+
tokenizer=tokenizer,
|
| 132 |
+
unet=unet,
|
| 133 |
+
scheduler=scheduler,
|
| 134 |
+
safety_checker=safety_checker,
|
| 135 |
+
feature_extractor=feature_extractor,
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
@torch.no_grad()
|
| 139 |
+
def __call__(
|
| 140 |
+
self,
|
| 141 |
+
prompt: Union[str, List[str]],
|
| 142 |
+
height: int = 512,
|
| 143 |
+
width: int = 512,
|
| 144 |
+
num_inference_steps: int = 50,
|
| 145 |
+
guidance_scale: float = 7.5,
|
| 146 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 147 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 148 |
+
eta: float = 0.0,
|
| 149 |
+
generator: Optional[torch.Generator] = None,
|
| 150 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 151 |
+
output_type: Optional[str] = "pil",
|
| 152 |
+
return_dict: bool = True,
|
| 153 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 154 |
+
callback_steps: int = 1,
|
| 155 |
+
**kwargs,
|
| 156 |
+
):
|
| 157 |
+
r"""
|
| 158 |
+
Function invoked when calling the pipeline for generation.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
prompt (`str` or `List[str]`):
|
| 162 |
+
The prompt or prompts to guide the image generation. Can be in different languages.
|
| 163 |
+
height (`int`, *optional*, defaults to 512):
|
| 164 |
+
The height in pixels of the generated image.
|
| 165 |
+
width (`int`, *optional*, defaults to 512):
|
| 166 |
+
The width in pixels of the generated image.
|
| 167 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 168 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 169 |
+
expense of slower inference.
|
| 170 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 171 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 172 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 173 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 174 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 175 |
+
usually at the expense of lower image quality.
|
| 176 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 177 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 178 |
+
if `guidance_scale` is less than `1`).
|
| 179 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 180 |
+
The number of images to generate per prompt.
|
| 181 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 182 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 183 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 184 |
+
generator (`torch.Generator`, *optional*):
|
| 185 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 186 |
+
deterministic.
|
| 187 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 188 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 189 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 190 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 191 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 192 |
+
The output format of the generate image. Choose between
|
| 193 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 194 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 195 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 196 |
+
plain tuple.
|
| 197 |
+
callback (`Callable`, *optional*):
|
| 198 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 199 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 200 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 201 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 202 |
+
called at every step.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 206 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 207 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 208 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 209 |
+
(nsfw) content, according to the `safety_checker`.
|
| 210 |
+
"""
|
| 211 |
+
if isinstance(prompt, str):
|
| 212 |
+
batch_size = 1
|
| 213 |
+
elif isinstance(prompt, list):
|
| 214 |
+
batch_size = len(prompt)
|
| 215 |
+
else:
|
| 216 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 217 |
+
|
| 218 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 219 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 220 |
+
|
| 221 |
+
if (callback_steps is None) or (
|
| 222 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 223 |
+
):
|
| 224 |
+
raise ValueError(
|
| 225 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 226 |
+
f" {type(callback_steps)}."
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# detect language and translate if necessary
|
| 230 |
+
prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
|
| 231 |
+
if batch_size == 1 and prompt_language != "en":
|
| 232 |
+
prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
|
| 233 |
+
|
| 234 |
+
if isinstance(prompt, list):
|
| 235 |
+
for index in range(batch_size):
|
| 236 |
+
if prompt_language[index] != "en":
|
| 237 |
+
p = translate_prompt(
|
| 238 |
+
prompt[index], self.translation_tokenizer, self.translation_model, self.device
|
| 239 |
+
)
|
| 240 |
+
prompt[index] = p
|
| 241 |
+
|
| 242 |
+
# get prompt text embeddings
|
| 243 |
+
text_inputs = self.tokenizer(
|
| 244 |
+
prompt,
|
| 245 |
+
padding="max_length",
|
| 246 |
+
max_length=self.tokenizer.model_max_length,
|
| 247 |
+
return_tensors="pt",
|
| 248 |
+
)
|
| 249 |
+
text_input_ids = text_inputs.input_ids
|
| 250 |
+
|
| 251 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 252 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 253 |
+
logger.warning(
|
| 254 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 255 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 256 |
+
)
|
| 257 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 258 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 259 |
+
|
| 260 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 261 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 262 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 263 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 264 |
+
|
| 265 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 266 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 267 |
+
# corresponds to doing no classifier free guidance.
|
| 268 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 269 |
+
# get unconditional embeddings for classifier free guidance
|
| 270 |
+
if do_classifier_free_guidance:
|
| 271 |
+
uncond_tokens: List[str]
|
| 272 |
+
if negative_prompt is None:
|
| 273 |
+
uncond_tokens = [""] * batch_size
|
| 274 |
+
elif type(prompt) is not type(negative_prompt):
|
| 275 |
+
raise TypeError(
|
| 276 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 277 |
+
f" {type(prompt)}."
|
| 278 |
+
)
|
| 279 |
+
elif isinstance(negative_prompt, str):
|
| 280 |
+
# detect language and translate it if necessary
|
| 281 |
+
negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
|
| 282 |
+
if negative_prompt_language != "en":
|
| 283 |
+
negative_prompt = translate_prompt(
|
| 284 |
+
negative_prompt, self.translation_tokenizer, self.translation_model, self.device
|
| 285 |
+
)
|
| 286 |
+
if isinstance(negative_prompt, str):
|
| 287 |
+
uncond_tokens = [negative_prompt]
|
| 288 |
+
elif batch_size != len(negative_prompt):
|
| 289 |
+
raise ValueError(
|
| 290 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 291 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 292 |
+
" the batch size of `prompt`."
|
| 293 |
+
)
|
| 294 |
+
else:
|
| 295 |
+
# detect language and translate it if necessary
|
| 296 |
+
if isinstance(negative_prompt, list):
|
| 297 |
+
negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
|
| 298 |
+
for index in range(batch_size):
|
| 299 |
+
if negative_prompt_languages[index] != "en":
|
| 300 |
+
p = translate_prompt(
|
| 301 |
+
negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
|
| 302 |
+
)
|
| 303 |
+
negative_prompt[index] = p
|
| 304 |
+
uncond_tokens = negative_prompt
|
| 305 |
+
|
| 306 |
+
max_length = text_input_ids.shape[-1]
|
| 307 |
+
uncond_input = self.tokenizer(
|
| 308 |
+
uncond_tokens,
|
| 309 |
+
padding="max_length",
|
| 310 |
+
max_length=max_length,
|
| 311 |
+
truncation=True,
|
| 312 |
+
return_tensors="pt",
|
| 313 |
+
)
|
| 314 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 315 |
+
|
| 316 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 317 |
+
seq_len = uncond_embeddings.shape[1]
|
| 318 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 319 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 320 |
+
|
| 321 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 322 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 323 |
+
# to avoid doing two forward passes
|
| 324 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 325 |
+
|
| 326 |
+
# get the initial random noise unless the user supplied it
|
| 327 |
+
|
| 328 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 329 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 330 |
+
# However this currently doesn't work in `mps`.
|
| 331 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 332 |
+
latents_dtype = text_embeddings.dtype
|
| 333 |
+
if latents is None:
|
| 334 |
+
if self.device.type == "mps":
|
| 335 |
+
# randn does not work reproducibly on mps
|
| 336 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 337 |
+
self.device
|
| 338 |
+
)
|
| 339 |
+
else:
|
| 340 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 341 |
+
else:
|
| 342 |
+
if latents.shape != latents_shape:
|
| 343 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 344 |
+
latents = latents.to(self.device)
|
| 345 |
+
|
| 346 |
+
# set timesteps
|
| 347 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 348 |
+
|
| 349 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 350 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 351 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 352 |
+
|
| 353 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 354 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 355 |
+
|
| 356 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 357 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 358 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 359 |
+
# and should be between [0, 1]
|
| 360 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 361 |
+
extra_step_kwargs = {}
|
| 362 |
+
if accepts_eta:
|
| 363 |
+
extra_step_kwargs["eta"] = eta
|
| 364 |
+
|
| 365 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 366 |
+
# expand the latents if we are doing classifier free guidance
|
| 367 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 368 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 369 |
+
|
| 370 |
+
# predict the noise residual
|
| 371 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 372 |
+
|
| 373 |
+
# perform guidance
|
| 374 |
+
if do_classifier_free_guidance:
|
| 375 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 376 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 377 |
+
|
| 378 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 379 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 380 |
+
|
| 381 |
+
# call the callback, if provided
|
| 382 |
+
if callback is not None and i % callback_steps == 0:
|
| 383 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 384 |
+
callback(step_idx, t, latents)
|
| 385 |
+
|
| 386 |
+
latents = 1 / 0.18215 * latents
|
| 387 |
+
image = self.vae.decode(latents).sample
|
| 388 |
+
|
| 389 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 390 |
+
|
| 391 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 392 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 393 |
+
|
| 394 |
+
if self.safety_checker is not None:
|
| 395 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 396 |
+
self.device
|
| 397 |
+
)
|
| 398 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 399 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 400 |
+
)
|
| 401 |
+
else:
|
| 402 |
+
has_nsfw_concept = None
|
| 403 |
+
|
| 404 |
+
if output_type == "pil":
|
| 405 |
+
image = self.numpy_to_pil(image)
|
| 406 |
+
|
| 407 |
+
if not return_dict:
|
| 408 |
+
return (image, has_nsfw_concept)
|
| 409 |
+
|
| 410 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/one_step_unet.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from diffusers import DiffusionPipeline
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
| 8 |
+
def __init__(self, unet, scheduler):
|
| 9 |
+
super().__init__()
|
| 10 |
+
|
| 11 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 12 |
+
|
| 13 |
+
def __call__(self):
|
| 14 |
+
image = torch.randn(
|
| 15 |
+
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
| 16 |
+
)
|
| 17 |
+
timestep = 1
|
| 18 |
+
|
| 19 |
+
model_output = self.unet(image, timestep).sample
|
| 20 |
+
scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
|
| 21 |
+
|
| 22 |
+
result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
|
| 23 |
+
|
| 24 |
+
return result
|
v0.27.0/pipeline_animatediff_controlnet.py
ADDED
|
@@ -0,0 +1,1114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 23 |
+
|
| 24 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 25 |
+
from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 26 |
+
from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
|
| 27 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 28 |
+
from diffusers.models.unets.unet_motion_model import MotionAdapter
|
| 29 |
+
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
|
| 30 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 31 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 32 |
+
from diffusers.schedulers import (
|
| 33 |
+
DDIMScheduler,
|
| 34 |
+
DPMSolverMultistepScheduler,
|
| 35 |
+
EulerAncestralDiscreteScheduler,
|
| 36 |
+
EulerDiscreteScheduler,
|
| 37 |
+
LMSDiscreteScheduler,
|
| 38 |
+
PNDMScheduler,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
|
| 41 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 45 |
+
|
| 46 |
+
EXAMPLE_DOC_STRING = """
|
| 47 |
+
Examples:
|
| 48 |
+
```py
|
| 49 |
+
>>> import torch
|
| 50 |
+
>>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
|
| 51 |
+
>>> from diffusers.pipelines import DiffusionPipeline
|
| 52 |
+
>>> from diffusers.schedulers import DPMSolverMultistepScheduler
|
| 53 |
+
>>> from PIL import Image
|
| 54 |
+
|
| 55 |
+
>>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
|
| 56 |
+
>>> adapter = MotionAdapter.from_pretrained(motion_id)
|
| 57 |
+
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
|
| 58 |
+
>>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
|
| 59 |
+
|
| 60 |
+
>>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 61 |
+
>>> pipe = DiffusionPipeline.from_pretrained(
|
| 62 |
+
... model_id,
|
| 63 |
+
... motion_adapter=adapter,
|
| 64 |
+
... controlnet=controlnet,
|
| 65 |
+
... vae=vae,
|
| 66 |
+
... custom_pipeline="pipeline_animatediff_controlnet",
|
| 67 |
+
... ).to(device="cuda", dtype=torch.float16)
|
| 68 |
+
>>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
| 69 |
+
... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
|
| 70 |
+
... )
|
| 71 |
+
>>> pipe.enable_vae_slicing()
|
| 72 |
+
|
| 73 |
+
>>> conditioning_frames = []
|
| 74 |
+
>>> for i in range(1, 16 + 1):
|
| 75 |
+
... conditioning_frames.append(Image.open(f"frame_{i}.png"))
|
| 76 |
+
|
| 77 |
+
>>> prompt = "astronaut in space, dancing"
|
| 78 |
+
>>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
|
| 79 |
+
>>> result = pipe(
|
| 80 |
+
... prompt=prompt,
|
| 81 |
+
... negative_prompt=negative_prompt,
|
| 82 |
+
... width=512,
|
| 83 |
+
... height=768,
|
| 84 |
+
... conditioning_frames=conditioning_frames,
|
| 85 |
+
... num_inference_steps=12,
|
| 86 |
+
... )
|
| 87 |
+
|
| 88 |
+
>>> from diffusers.utils import export_to_gif
|
| 89 |
+
>>> export_to_gif(result.frames[0], "result.gif")
|
| 90 |
+
```
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
|
| 95 |
+
def tensor2vid(video: torch.Tensor, processor, output_type="np"):
|
| 96 |
+
batch_size, channels, num_frames, height, width = video.shape
|
| 97 |
+
outputs = []
|
| 98 |
+
for batch_idx in range(batch_size):
|
| 99 |
+
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
| 100 |
+
batch_output = processor.postprocess(batch_vid, output_type)
|
| 101 |
+
|
| 102 |
+
outputs.append(batch_output)
|
| 103 |
+
|
| 104 |
+
if output_type == "np":
|
| 105 |
+
outputs = np.stack(outputs)
|
| 106 |
+
|
| 107 |
+
elif output_type == "pt":
|
| 108 |
+
outputs = torch.stack(outputs)
|
| 109 |
+
|
| 110 |
+
elif not output_type == "pil":
|
| 111 |
+
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
| 112 |
+
|
| 113 |
+
return outputs
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class AnimateDiffControlNetPipeline(
|
| 117 |
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
|
| 118 |
+
):
|
| 119 |
+
r"""
|
| 120 |
+
Pipeline for text-to-video generation.
|
| 121 |
+
|
| 122 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 123 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 124 |
+
|
| 125 |
+
The pipeline also inherits the following loading methods:
|
| 126 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 127 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 128 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 129 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
vae ([`AutoencoderKL`]):
|
| 133 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 134 |
+
text_encoder ([`CLIPTextModel`]):
|
| 135 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 136 |
+
tokenizer (`CLIPTokenizer`):
|
| 137 |
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
| 138 |
+
unet ([`UNet2DConditionModel`]):
|
| 139 |
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
| 140 |
+
motion_adapter ([`MotionAdapter`]):
|
| 141 |
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
| 142 |
+
scheduler ([`SchedulerMixin`]):
|
| 143 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 144 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 148 |
+
_optional_components = ["feature_extractor", "image_encoder"]
|
| 149 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 150 |
+
|
| 151 |
+
def __init__(
|
| 152 |
+
self,
|
| 153 |
+
vae: AutoencoderKL,
|
| 154 |
+
text_encoder: CLIPTextModel,
|
| 155 |
+
tokenizer: CLIPTokenizer,
|
| 156 |
+
unet: UNet2DConditionModel,
|
| 157 |
+
motion_adapter: MotionAdapter,
|
| 158 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 159 |
+
scheduler: Union[
|
| 160 |
+
DDIMScheduler,
|
| 161 |
+
PNDMScheduler,
|
| 162 |
+
LMSDiscreteScheduler,
|
| 163 |
+
EulerDiscreteScheduler,
|
| 164 |
+
EulerAncestralDiscreteScheduler,
|
| 165 |
+
DPMSolverMultistepScheduler,
|
| 166 |
+
],
|
| 167 |
+
feature_extractor: Optional[CLIPImageProcessor] = None,
|
| 168 |
+
image_encoder: Optional[CLIPVisionModelWithProjection] = None,
|
| 169 |
+
):
|
| 170 |
+
super().__init__()
|
| 171 |
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
| 172 |
+
|
| 173 |
+
if isinstance(controlnet, (list, tuple)):
|
| 174 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 175 |
+
|
| 176 |
+
self.register_modules(
|
| 177 |
+
vae=vae,
|
| 178 |
+
text_encoder=text_encoder,
|
| 179 |
+
tokenizer=tokenizer,
|
| 180 |
+
unet=unet,
|
| 181 |
+
motion_adapter=motion_adapter,
|
| 182 |
+
controlnet=controlnet,
|
| 183 |
+
scheduler=scheduler,
|
| 184 |
+
feature_extractor=feature_extractor,
|
| 185 |
+
image_encoder=image_encoder,
|
| 186 |
+
)
|
| 187 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 188 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 189 |
+
self.control_image_processor = VaeImageProcessor(
|
| 190 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
| 194 |
+
def encode_prompt(
|
| 195 |
+
self,
|
| 196 |
+
prompt,
|
| 197 |
+
device,
|
| 198 |
+
num_images_per_prompt,
|
| 199 |
+
do_classifier_free_guidance,
|
| 200 |
+
negative_prompt=None,
|
| 201 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 202 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 203 |
+
lora_scale: Optional[float] = None,
|
| 204 |
+
clip_skip: Optional[int] = None,
|
| 205 |
+
):
|
| 206 |
+
r"""
|
| 207 |
+
Encodes the prompt into text encoder hidden states.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 211 |
+
prompt to be encoded
|
| 212 |
+
device: (`torch.device`):
|
| 213 |
+
torch device
|
| 214 |
+
num_images_per_prompt (`int`):
|
| 215 |
+
number of images that should be generated per prompt
|
| 216 |
+
do_classifier_free_guidance (`bool`):
|
| 217 |
+
whether to use classifier free guidance or not
|
| 218 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 219 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 220 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 221 |
+
less than `1`).
|
| 222 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 223 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 224 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 225 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 226 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 227 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 228 |
+
argument.
|
| 229 |
+
lora_scale (`float`, *optional*):
|
| 230 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 231 |
+
clip_skip (`int`, *optional*):
|
| 232 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 233 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 234 |
+
"""
|
| 235 |
+
# set lora scale so that monkey patched LoRA
|
| 236 |
+
# function of text encoder can correctly access it
|
| 237 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 238 |
+
self._lora_scale = lora_scale
|
| 239 |
+
|
| 240 |
+
# dynamically adjust the LoRA scale
|
| 241 |
+
if not USE_PEFT_BACKEND:
|
| 242 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 243 |
+
else:
|
| 244 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 245 |
+
|
| 246 |
+
if prompt is not None and isinstance(prompt, str):
|
| 247 |
+
batch_size = 1
|
| 248 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 249 |
+
batch_size = len(prompt)
|
| 250 |
+
else:
|
| 251 |
+
batch_size = prompt_embeds.shape[0]
|
| 252 |
+
|
| 253 |
+
if prompt_embeds is None:
|
| 254 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 255 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 256 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 257 |
+
|
| 258 |
+
text_inputs = self.tokenizer(
|
| 259 |
+
prompt,
|
| 260 |
+
padding="max_length",
|
| 261 |
+
max_length=self.tokenizer.model_max_length,
|
| 262 |
+
truncation=True,
|
| 263 |
+
return_tensors="pt",
|
| 264 |
+
)
|
| 265 |
+
text_input_ids = text_inputs.input_ids
|
| 266 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 267 |
+
|
| 268 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 269 |
+
text_input_ids, untruncated_ids
|
| 270 |
+
):
|
| 271 |
+
removed_text = self.tokenizer.batch_decode(
|
| 272 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 273 |
+
)
|
| 274 |
+
logger.warning(
|
| 275 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 276 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 280 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 281 |
+
else:
|
| 282 |
+
attention_mask = None
|
| 283 |
+
|
| 284 |
+
if clip_skip is None:
|
| 285 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 286 |
+
prompt_embeds = prompt_embeds[0]
|
| 287 |
+
else:
|
| 288 |
+
prompt_embeds = self.text_encoder(
|
| 289 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 290 |
+
)
|
| 291 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 292 |
+
# all the hidden states from the encoder layers. Then index into
|
| 293 |
+
# the tuple to access the hidden states from the desired layer.
|
| 294 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 295 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 296 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 297 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 298 |
+
# layer.
|
| 299 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 300 |
+
|
| 301 |
+
if self.text_encoder is not None:
|
| 302 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 303 |
+
elif self.unet is not None:
|
| 304 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 305 |
+
else:
|
| 306 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 307 |
+
|
| 308 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 309 |
+
|
| 310 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 311 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 312 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 313 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 314 |
+
|
| 315 |
+
# get unconditional embeddings for classifier free guidance
|
| 316 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 317 |
+
uncond_tokens: List[str]
|
| 318 |
+
if negative_prompt is None:
|
| 319 |
+
uncond_tokens = [""] * batch_size
|
| 320 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 321 |
+
raise TypeError(
|
| 322 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 323 |
+
f" {type(prompt)}."
|
| 324 |
+
)
|
| 325 |
+
elif isinstance(negative_prompt, str):
|
| 326 |
+
uncond_tokens = [negative_prompt]
|
| 327 |
+
elif batch_size != len(negative_prompt):
|
| 328 |
+
raise ValueError(
|
| 329 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 330 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 331 |
+
" the batch size of `prompt`."
|
| 332 |
+
)
|
| 333 |
+
else:
|
| 334 |
+
uncond_tokens = negative_prompt
|
| 335 |
+
|
| 336 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 337 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 338 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 339 |
+
|
| 340 |
+
max_length = prompt_embeds.shape[1]
|
| 341 |
+
uncond_input = self.tokenizer(
|
| 342 |
+
uncond_tokens,
|
| 343 |
+
padding="max_length",
|
| 344 |
+
max_length=max_length,
|
| 345 |
+
truncation=True,
|
| 346 |
+
return_tensors="pt",
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 350 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 351 |
+
else:
|
| 352 |
+
attention_mask = None
|
| 353 |
+
|
| 354 |
+
negative_prompt_embeds = self.text_encoder(
|
| 355 |
+
uncond_input.input_ids.to(device),
|
| 356 |
+
attention_mask=attention_mask,
|
| 357 |
+
)
|
| 358 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 359 |
+
|
| 360 |
+
if do_classifier_free_guidance:
|
| 361 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 362 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 363 |
+
|
| 364 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 365 |
+
|
| 366 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 367 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 368 |
+
|
| 369 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 370 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 371 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 372 |
+
|
| 373 |
+
return prompt_embeds, negative_prompt_embeds
|
| 374 |
+
|
| 375 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 376 |
+
def encode_image(self, image, device, num_images_per_prompt):
|
| 377 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 378 |
+
|
| 379 |
+
if not isinstance(image, torch.Tensor):
|
| 380 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 381 |
+
|
| 382 |
+
image = image.to(device=device, dtype=dtype)
|
| 383 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 384 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 385 |
+
|
| 386 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 387 |
+
return image_embeds, uncond_image_embeds
|
| 388 |
+
|
| 389 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 390 |
+
def prepare_ip_adapter_image_embeds(
|
| 391 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
|
| 392 |
+
):
|
| 393 |
+
if ip_adapter_image_embeds is None:
|
| 394 |
+
if not isinstance(ip_adapter_image, list):
|
| 395 |
+
ip_adapter_image = [ip_adapter_image]
|
| 396 |
+
|
| 397 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 398 |
+
raise ValueError(
|
| 399 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
image_embeds = []
|
| 403 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 404 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 405 |
+
):
|
| 406 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 407 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 408 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 409 |
+
)
|
| 410 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 411 |
+
single_negative_image_embeds = torch.stack(
|
| 412 |
+
[single_negative_image_embeds] * num_images_per_prompt, dim=0
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
if self.do_classifier_free_guidance:
|
| 416 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 417 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 418 |
+
|
| 419 |
+
image_embeds.append(single_image_embeds)
|
| 420 |
+
else:
|
| 421 |
+
image_embeds = ip_adapter_image_embeds
|
| 422 |
+
return image_embeds
|
| 423 |
+
|
| 424 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
|
| 425 |
+
def decode_latents(self, latents):
|
| 426 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 427 |
+
|
| 428 |
+
batch_size, channels, num_frames, height, width = latents.shape
|
| 429 |
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 430 |
+
|
| 431 |
+
image = self.vae.decode(latents).sample
|
| 432 |
+
video = (
|
| 433 |
+
image[None, :]
|
| 434 |
+
.reshape(
|
| 435 |
+
(
|
| 436 |
+
batch_size,
|
| 437 |
+
num_frames,
|
| 438 |
+
-1,
|
| 439 |
+
)
|
| 440 |
+
+ image.shape[2:]
|
| 441 |
+
)
|
| 442 |
+
.permute(0, 2, 1, 3, 4)
|
| 443 |
+
)
|
| 444 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 445 |
+
video = video.float()
|
| 446 |
+
return video
|
| 447 |
+
|
| 448 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 449 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 450 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 451 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 452 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 453 |
+
# and should be between [0, 1]
|
| 454 |
+
|
| 455 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 456 |
+
extra_step_kwargs = {}
|
| 457 |
+
if accepts_eta:
|
| 458 |
+
extra_step_kwargs["eta"] = eta
|
| 459 |
+
|
| 460 |
+
# check if the scheduler accepts generator
|
| 461 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 462 |
+
if accepts_generator:
|
| 463 |
+
extra_step_kwargs["generator"] = generator
|
| 464 |
+
return extra_step_kwargs
|
| 465 |
+
|
| 466 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
| 467 |
+
def check_inputs(
|
| 468 |
+
self,
|
| 469 |
+
prompt,
|
| 470 |
+
height,
|
| 471 |
+
width,
|
| 472 |
+
num_frames,
|
| 473 |
+
callback_steps,
|
| 474 |
+
negative_prompt=None,
|
| 475 |
+
prompt_embeds=None,
|
| 476 |
+
negative_prompt_embeds=None,
|
| 477 |
+
callback_on_step_end_tensor_inputs=None,
|
| 478 |
+
image=None,
|
| 479 |
+
controlnet_conditioning_scale=1.0,
|
| 480 |
+
control_guidance_start=0.0,
|
| 481 |
+
control_guidance_end=1.0,
|
| 482 |
+
):
|
| 483 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 484 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 485 |
+
|
| 486 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 487 |
+
raise ValueError(
|
| 488 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 489 |
+
f" {type(callback_steps)}."
|
| 490 |
+
)
|
| 491 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 492 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 493 |
+
):
|
| 494 |
+
raise ValueError(
|
| 495 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
if prompt is not None and prompt_embeds is not None:
|
| 499 |
+
raise ValueError(
|
| 500 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 501 |
+
" only forward one of the two."
|
| 502 |
+
)
|
| 503 |
+
elif prompt is None and prompt_embeds is None:
|
| 504 |
+
raise ValueError(
|
| 505 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 506 |
+
)
|
| 507 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 508 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 509 |
+
|
| 510 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 511 |
+
raise ValueError(
|
| 512 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 513 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 517 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 518 |
+
raise ValueError(
|
| 519 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 520 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 521 |
+
f" {negative_prompt_embeds.shape}."
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 525 |
+
# conditionings.
|
| 526 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 527 |
+
if isinstance(prompt, list):
|
| 528 |
+
logger.warning(
|
| 529 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 530 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
# Check `image`
|
| 534 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 535 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 536 |
+
)
|
| 537 |
+
if (
|
| 538 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 539 |
+
or is_compiled
|
| 540 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 541 |
+
):
|
| 542 |
+
if not isinstance(image, list):
|
| 543 |
+
raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
|
| 544 |
+
if len(image) != num_frames:
|
| 545 |
+
raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
|
| 546 |
+
elif (
|
| 547 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 548 |
+
or is_compiled
|
| 549 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 550 |
+
):
|
| 551 |
+
if not isinstance(image, list) or not isinstance(image[0], list):
|
| 552 |
+
raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
|
| 553 |
+
if len(image[0]) != num_frames:
|
| 554 |
+
raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
|
| 555 |
+
if any(len(img) != len(image[0]) for img in image):
|
| 556 |
+
raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
|
| 557 |
+
else:
|
| 558 |
+
assert False
|
| 559 |
+
|
| 560 |
+
# Check `controlnet_conditioning_scale`
|
| 561 |
+
if (
|
| 562 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 563 |
+
or is_compiled
|
| 564 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 565 |
+
):
|
| 566 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 567 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 568 |
+
elif (
|
| 569 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 570 |
+
or is_compiled
|
| 571 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 572 |
+
):
|
| 573 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 574 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 575 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 576 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 577 |
+
self.controlnet.nets
|
| 578 |
+
):
|
| 579 |
+
raise ValueError(
|
| 580 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 581 |
+
" the same length as the number of controlnets"
|
| 582 |
+
)
|
| 583 |
+
else:
|
| 584 |
+
assert False
|
| 585 |
+
|
| 586 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 587 |
+
control_guidance_start = [control_guidance_start]
|
| 588 |
+
|
| 589 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 590 |
+
control_guidance_end = [control_guidance_end]
|
| 591 |
+
|
| 592 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 593 |
+
raise ValueError(
|
| 594 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 598 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 599 |
+
raise ValueError(
|
| 600 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 604 |
+
if start >= end:
|
| 605 |
+
raise ValueError(
|
| 606 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 607 |
+
)
|
| 608 |
+
if start < 0.0:
|
| 609 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 610 |
+
if end > 1.0:
|
| 611 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 612 |
+
|
| 613 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 614 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 615 |
+
image_is_pil = isinstance(image, Image.Image)
|
| 616 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 617 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 618 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
|
| 619 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 620 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 621 |
+
|
| 622 |
+
if (
|
| 623 |
+
not image_is_pil
|
| 624 |
+
and not image_is_tensor
|
| 625 |
+
and not image_is_np
|
| 626 |
+
and not image_is_pil_list
|
| 627 |
+
and not image_is_tensor_list
|
| 628 |
+
and not image_is_np_list
|
| 629 |
+
):
|
| 630 |
+
raise TypeError(
|
| 631 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
if image_is_pil:
|
| 635 |
+
image_batch_size = 1
|
| 636 |
+
else:
|
| 637 |
+
image_batch_size = len(image)
|
| 638 |
+
|
| 639 |
+
if prompt is not None and isinstance(prompt, str):
|
| 640 |
+
prompt_batch_size = 1
|
| 641 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 642 |
+
prompt_batch_size = len(prompt)
|
| 643 |
+
elif prompt_embeds is not None:
|
| 644 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 645 |
+
|
| 646 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 647 |
+
raise ValueError(
|
| 648 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
|
| 652 |
+
def prepare_latents(
|
| 653 |
+
self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
|
| 654 |
+
):
|
| 655 |
+
shape = (
|
| 656 |
+
batch_size,
|
| 657 |
+
num_channels_latents,
|
| 658 |
+
num_frames,
|
| 659 |
+
height // self.vae_scale_factor,
|
| 660 |
+
width // self.vae_scale_factor,
|
| 661 |
+
)
|
| 662 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 663 |
+
raise ValueError(
|
| 664 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 665 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
if latents is None:
|
| 669 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 670 |
+
else:
|
| 671 |
+
latents = latents.to(device)
|
| 672 |
+
|
| 673 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 674 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 675 |
+
return latents
|
| 676 |
+
|
| 677 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 678 |
+
def prepare_image(
|
| 679 |
+
self,
|
| 680 |
+
image,
|
| 681 |
+
width,
|
| 682 |
+
height,
|
| 683 |
+
batch_size,
|
| 684 |
+
num_images_per_prompt,
|
| 685 |
+
device,
|
| 686 |
+
dtype,
|
| 687 |
+
do_classifier_free_guidance=False,
|
| 688 |
+
guess_mode=False,
|
| 689 |
+
):
|
| 690 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 691 |
+
image_batch_size = image.shape[0]
|
| 692 |
+
|
| 693 |
+
if image_batch_size == 1:
|
| 694 |
+
repeat_by = batch_size
|
| 695 |
+
else:
|
| 696 |
+
# image batch size is the same as prompt batch size
|
| 697 |
+
repeat_by = num_images_per_prompt
|
| 698 |
+
|
| 699 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 700 |
+
|
| 701 |
+
image = image.to(device=device, dtype=dtype)
|
| 702 |
+
|
| 703 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 704 |
+
image = torch.cat([image] * 2)
|
| 705 |
+
|
| 706 |
+
return image
|
| 707 |
+
|
| 708 |
+
@property
|
| 709 |
+
def guidance_scale(self):
|
| 710 |
+
return self._guidance_scale
|
| 711 |
+
|
| 712 |
+
@property
|
| 713 |
+
def clip_skip(self):
|
| 714 |
+
return self._clip_skip
|
| 715 |
+
|
| 716 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 717 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 718 |
+
# corresponds to doing no classifier free guidance.
|
| 719 |
+
@property
|
| 720 |
+
def do_classifier_free_guidance(self):
|
| 721 |
+
return self._guidance_scale > 1
|
| 722 |
+
|
| 723 |
+
@property
|
| 724 |
+
def cross_attention_kwargs(self):
|
| 725 |
+
return self._cross_attention_kwargs
|
| 726 |
+
|
| 727 |
+
@property
|
| 728 |
+
def num_timesteps(self):
|
| 729 |
+
return self._num_timesteps
|
| 730 |
+
|
| 731 |
+
@torch.no_grad()
|
| 732 |
+
def __call__(
|
| 733 |
+
self,
|
| 734 |
+
prompt: Union[str, List[str]] = None,
|
| 735 |
+
num_frames: Optional[int] = 16,
|
| 736 |
+
height: Optional[int] = None,
|
| 737 |
+
width: Optional[int] = None,
|
| 738 |
+
num_inference_steps: int = 50,
|
| 739 |
+
guidance_scale: float = 7.5,
|
| 740 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 741 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 742 |
+
eta: float = 0.0,
|
| 743 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 744 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 745 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 746 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 747 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 748 |
+
ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
|
| 749 |
+
conditioning_frames: Optional[List[PipelineImageInput]] = None,
|
| 750 |
+
output_type: Optional[str] = "pil",
|
| 751 |
+
return_dict: bool = True,
|
| 752 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 753 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 754 |
+
guess_mode: bool = False,
|
| 755 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 756 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 757 |
+
clip_skip: Optional[int] = None,
|
| 758 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 759 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 760 |
+
**kwargs,
|
| 761 |
+
):
|
| 762 |
+
r"""
|
| 763 |
+
The call function to the pipeline for generation.
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 767 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 768 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 769 |
+
The height in pixels of the generated video.
|
| 770 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 771 |
+
The width in pixels of the generated video.
|
| 772 |
+
num_frames (`int`, *optional*, defaults to 16):
|
| 773 |
+
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
|
| 774 |
+
amounts to 2 seconds of video.
|
| 775 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 776 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
| 777 |
+
expense of slower inference.
|
| 778 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 779 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 780 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 781 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 782 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 783 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 784 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 785 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 786 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 787 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 788 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 789 |
+
generation deterministic.
|
| 790 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 791 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
| 792 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 793 |
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
| 794 |
+
`(batch_size, num_channel, num_frames, height, width)`.
|
| 795 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 796 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 797 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 798 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 799 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 800 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 801 |
+
ip_adapter_image (`PipelineImageInput`, *optional*):
|
| 802 |
+
Optional image input to work with IP Adapters.
|
| 803 |
+
ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
|
| 804 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
|
| 805 |
+
Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
|
| 806 |
+
if `do_classifier_free_guidance` is set to `True`.
|
| 807 |
+
If not provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 808 |
+
conditioning_frames (`List[PipelineImageInput]`, *optional*):
|
| 809 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
|
| 810 |
+
are specified, images must be passed as a list such that each element of the list can be correctly
|
| 811 |
+
batched for input to a single ControlNet.
|
| 812 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 813 |
+
The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
|
| 814 |
+
`np.array`.
|
| 815 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 816 |
+
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
|
| 817 |
+
of a plain tuple.
|
| 818 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 819 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 820 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 821 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 822 |
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 823 |
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
| 824 |
+
the corresponding scale as a list.
|
| 825 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 826 |
+
The ControlNet encoder tries to recognize the content of the input image even if you remove all
|
| 827 |
+
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
|
| 828 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 829 |
+
The percentage of total steps at which the ControlNet starts applying.
|
| 830 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 831 |
+
The percentage of total steps at which the ControlNet stops applying.
|
| 832 |
+
clip_skip (`int`, *optional*):
|
| 833 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 834 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 835 |
+
allback_on_step_end (`Callable`, *optional*):
|
| 836 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 837 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 838 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 839 |
+
`callback_on_step_end_tensor_inputs`.
|
| 840 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 841 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 842 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 843 |
+
`._callback_tensor_inputs` attribute of your pipeine class.
|
| 844 |
+
|
| 845 |
+
Examples:
|
| 846 |
+
|
| 847 |
+
Returns:
|
| 848 |
+
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
| 849 |
+
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
| 850 |
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
| 851 |
+
"""
|
| 852 |
+
|
| 853 |
+
callback = kwargs.pop("callback", None)
|
| 854 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 855 |
+
|
| 856 |
+
if callback is not None:
|
| 857 |
+
deprecate(
|
| 858 |
+
"callback",
|
| 859 |
+
"1.0.0",
|
| 860 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 861 |
+
)
|
| 862 |
+
if callback_steps is not None:
|
| 863 |
+
deprecate(
|
| 864 |
+
"callback_steps",
|
| 865 |
+
"1.0.0",
|
| 866 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 867 |
+
)
|
| 868 |
+
|
| 869 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 870 |
+
|
| 871 |
+
# align format for control guidance
|
| 872 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 873 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 874 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 875 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 876 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 877 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 878 |
+
control_guidance_start, control_guidance_end = (
|
| 879 |
+
mult * [control_guidance_start],
|
| 880 |
+
mult * [control_guidance_end],
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
# 0. Default height and width to unet
|
| 884 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 885 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 886 |
+
|
| 887 |
+
num_videos_per_prompt = 1
|
| 888 |
+
|
| 889 |
+
# 1. Check inputs. Raise error if not correct
|
| 890 |
+
self.check_inputs(
|
| 891 |
+
prompt=prompt,
|
| 892 |
+
height=height,
|
| 893 |
+
width=width,
|
| 894 |
+
num_frames=num_frames,
|
| 895 |
+
callback_steps=callback_steps,
|
| 896 |
+
negative_prompt=negative_prompt,
|
| 897 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 898 |
+
prompt_embeds=prompt_embeds,
|
| 899 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 900 |
+
image=conditioning_frames,
|
| 901 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 902 |
+
control_guidance_start=control_guidance_start,
|
| 903 |
+
control_guidance_end=control_guidance_end,
|
| 904 |
+
)
|
| 905 |
+
|
| 906 |
+
self._guidance_scale = guidance_scale
|
| 907 |
+
self._clip_skip = clip_skip
|
| 908 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 909 |
+
|
| 910 |
+
# 2. Define call parameters
|
| 911 |
+
if prompt is not None and isinstance(prompt, str):
|
| 912 |
+
batch_size = 1
|
| 913 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 914 |
+
batch_size = len(prompt)
|
| 915 |
+
else:
|
| 916 |
+
batch_size = prompt_embeds.shape[0]
|
| 917 |
+
|
| 918 |
+
device = self._execution_device
|
| 919 |
+
|
| 920 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 921 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 922 |
+
|
| 923 |
+
global_pool_conditions = (
|
| 924 |
+
controlnet.config.global_pool_conditions
|
| 925 |
+
if isinstance(controlnet, ControlNetModel)
|
| 926 |
+
else controlnet.nets[0].config.global_pool_conditions
|
| 927 |
+
)
|
| 928 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 929 |
+
|
| 930 |
+
# 3. Encode input prompt
|
| 931 |
+
text_encoder_lora_scale = (
|
| 932 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 933 |
+
)
|
| 934 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 935 |
+
prompt,
|
| 936 |
+
device,
|
| 937 |
+
num_videos_per_prompt,
|
| 938 |
+
self.do_classifier_free_guidance,
|
| 939 |
+
negative_prompt,
|
| 940 |
+
prompt_embeds=prompt_embeds,
|
| 941 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 942 |
+
lora_scale=text_encoder_lora_scale,
|
| 943 |
+
clip_skip=self.clip_skip,
|
| 944 |
+
)
|
| 945 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 946 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 947 |
+
# to avoid doing two forward passes
|
| 948 |
+
if self.do_classifier_free_guidance:
|
| 949 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 950 |
+
|
| 951 |
+
if ip_adapter_image is not None:
|
| 952 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 953 |
+
ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
if isinstance(controlnet, ControlNetModel):
|
| 957 |
+
conditioning_frames = self.prepare_image(
|
| 958 |
+
image=conditioning_frames,
|
| 959 |
+
width=width,
|
| 960 |
+
height=height,
|
| 961 |
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
| 962 |
+
num_images_per_prompt=num_videos_per_prompt,
|
| 963 |
+
device=device,
|
| 964 |
+
dtype=controlnet.dtype,
|
| 965 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 966 |
+
guess_mode=guess_mode,
|
| 967 |
+
)
|
| 968 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 969 |
+
cond_prepared_frames = []
|
| 970 |
+
for frame_ in conditioning_frames:
|
| 971 |
+
prepared_frame = self.prepare_image(
|
| 972 |
+
image=frame_,
|
| 973 |
+
width=width,
|
| 974 |
+
height=height,
|
| 975 |
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
| 976 |
+
num_images_per_prompt=num_videos_per_prompt,
|
| 977 |
+
device=device,
|
| 978 |
+
dtype=controlnet.dtype,
|
| 979 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 980 |
+
guess_mode=guess_mode,
|
| 981 |
+
)
|
| 982 |
+
cond_prepared_frames.append(prepared_frame)
|
| 983 |
+
conditioning_frames = cond_prepared_frames
|
| 984 |
+
else:
|
| 985 |
+
assert False
|
| 986 |
+
|
| 987 |
+
# 4. Prepare timesteps
|
| 988 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 989 |
+
timesteps = self.scheduler.timesteps
|
| 990 |
+
self._num_timesteps = len(timesteps)
|
| 991 |
+
|
| 992 |
+
# 5. Prepare latent variables
|
| 993 |
+
num_channels_latents = self.unet.config.in_channels
|
| 994 |
+
latents = self.prepare_latents(
|
| 995 |
+
batch_size * num_videos_per_prompt,
|
| 996 |
+
num_channels_latents,
|
| 997 |
+
num_frames,
|
| 998 |
+
height,
|
| 999 |
+
width,
|
| 1000 |
+
prompt_embeds.dtype,
|
| 1001 |
+
device,
|
| 1002 |
+
generator,
|
| 1003 |
+
latents,
|
| 1004 |
+
)
|
| 1005 |
+
|
| 1006 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1007 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1008 |
+
|
| 1009 |
+
# 7. Add image embeds for IP-Adapter
|
| 1010 |
+
added_cond_kwargs = (
|
| 1011 |
+
{"image_embeds": image_embeds}
|
| 1012 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
| 1013 |
+
else None
|
| 1014 |
+
)
|
| 1015 |
+
|
| 1016 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 1017 |
+
controlnet_keep = []
|
| 1018 |
+
for i in range(len(timesteps)):
|
| 1019 |
+
keeps = [
|
| 1020 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1021 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1022 |
+
]
|
| 1023 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1024 |
+
|
| 1025 |
+
# 8. Denoising loop
|
| 1026 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1027 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1028 |
+
for i, t in enumerate(timesteps):
|
| 1029 |
+
# expand the latents if we are doing classifier free guidance
|
| 1030 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1031 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1032 |
+
|
| 1033 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1034 |
+
# Infer ControlNet only for the conditional batch.
|
| 1035 |
+
control_model_input = latents
|
| 1036 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1037 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1038 |
+
else:
|
| 1039 |
+
control_model_input = latent_model_input
|
| 1040 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1041 |
+
controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
|
| 1042 |
+
|
| 1043 |
+
if isinstance(controlnet_keep[i], list):
|
| 1044 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1045 |
+
else:
|
| 1046 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1047 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1048 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1049 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1050 |
+
|
| 1051 |
+
control_model_input = torch.transpose(control_model_input, 1, 2)
|
| 1052 |
+
control_model_input = control_model_input.reshape(
|
| 1053 |
+
(-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
|
| 1054 |
+
)
|
| 1055 |
+
|
| 1056 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1057 |
+
control_model_input,
|
| 1058 |
+
t,
|
| 1059 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1060 |
+
controlnet_cond=conditioning_frames,
|
| 1061 |
+
conditioning_scale=cond_scale,
|
| 1062 |
+
guess_mode=guess_mode,
|
| 1063 |
+
return_dict=False,
|
| 1064 |
+
)
|
| 1065 |
+
|
| 1066 |
+
# predict the noise residual
|
| 1067 |
+
noise_pred = self.unet(
|
| 1068 |
+
latent_model_input,
|
| 1069 |
+
t,
|
| 1070 |
+
encoder_hidden_states=prompt_embeds,
|
| 1071 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1072 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1073 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1074 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1075 |
+
).sample
|
| 1076 |
+
|
| 1077 |
+
# perform guidance
|
| 1078 |
+
if self.do_classifier_free_guidance:
|
| 1079 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1080 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1081 |
+
|
| 1082 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1083 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 1084 |
+
|
| 1085 |
+
if callback_on_step_end is not None:
|
| 1086 |
+
callback_kwargs = {}
|
| 1087 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1088 |
+
callback_kwargs[k] = locals()[k]
|
| 1089 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1090 |
+
|
| 1091 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1092 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1093 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1094 |
+
|
| 1095 |
+
# call the callback, if provided
|
| 1096 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1097 |
+
progress_bar.update()
|
| 1098 |
+
if callback is not None and i % callback_steps == 0:
|
| 1099 |
+
callback(i, t, latents)
|
| 1100 |
+
|
| 1101 |
+
# 9. Post processing
|
| 1102 |
+
if output_type == "latent":
|
| 1103 |
+
video = latents
|
| 1104 |
+
else:
|
| 1105 |
+
video_tensor = self.decode_latents(latents)
|
| 1106 |
+
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
| 1107 |
+
|
| 1108 |
+
# 10. Offload all models
|
| 1109 |
+
self.maybe_free_model_hooks()
|
| 1110 |
+
|
| 1111 |
+
if not return_dict:
|
| 1112 |
+
return (video,)
|
| 1113 |
+
|
| 1114 |
+
return AnimateDiffPipelineOutput(frames=video)
|
v0.27.0/pipeline_animatediff_img2video.py
ADDED
|
@@ -0,0 +1,980 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
# Note:
|
| 16 |
+
# This pipeline relies on a "hack" discovered by the community that allows
|
| 17 |
+
# the generation of videos given an input image with AnimateDiff. It works
|
| 18 |
+
# by creating a copy of the image `num_frames` times and progressively adding
|
| 19 |
+
# more noise to the image based on the strength and latent interpolation method.
|
| 20 |
+
|
| 21 |
+
import inspect
|
| 22 |
+
from types import FunctionType
|
| 23 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 28 |
+
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 31 |
+
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
|
| 32 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 33 |
+
from diffusers.models.unet_motion_model import MotionAdapter
|
| 34 |
+
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
|
| 35 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 36 |
+
from diffusers.schedulers import (
|
| 37 |
+
DDIMScheduler,
|
| 38 |
+
DPMSolverMultistepScheduler,
|
| 39 |
+
EulerAncestralDiscreteScheduler,
|
| 40 |
+
EulerDiscreteScheduler,
|
| 41 |
+
LMSDiscreteScheduler,
|
| 42 |
+
PNDMScheduler,
|
| 43 |
+
)
|
| 44 |
+
from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
|
| 45 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 49 |
+
|
| 50 |
+
EXAMPLE_DOC_STRING = """
|
| 51 |
+
Examples:
|
| 52 |
+
```py
|
| 53 |
+
>>> import torch
|
| 54 |
+
>>> from diffusers import MotionAdapter, DiffusionPipeline, DDIMScheduler
|
| 55 |
+
>>> from diffusers.utils import export_to_gif, load_image
|
| 56 |
+
|
| 57 |
+
>>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 58 |
+
>>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
|
| 59 |
+
>>> pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter, custom_pipeline="pipeline_animatediff_img2video").to("cuda")
|
| 60 |
+
>>> pipe.scheduler = pipe.scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1)
|
| 61 |
+
|
| 62 |
+
>>> image = load_image("snail.png")
|
| 63 |
+
>>> output = pipe(image=image, prompt="A snail moving on the ground", strength=0.8, latent_interpolation_method="slerp")
|
| 64 |
+
>>> frames = output.frames[0]
|
| 65 |
+
>>> export_to_gif(frames, "animation.gif")
|
| 66 |
+
```
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def lerp(
|
| 71 |
+
v0: torch.Tensor,
|
| 72 |
+
v1: torch.Tensor,
|
| 73 |
+
t: Union[float, torch.Tensor],
|
| 74 |
+
) -> torch.Tensor:
|
| 75 |
+
r"""
|
| 76 |
+
Linear Interpolation between two tensors.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
v0 (`torch.Tensor`): First tensor.
|
| 80 |
+
v1 (`torch.Tensor`): Second tensor.
|
| 81 |
+
t: (`float` or `torch.Tensor`): Interpolation factor.
|
| 82 |
+
"""
|
| 83 |
+
t_is_float = False
|
| 84 |
+
input_device = v0.device
|
| 85 |
+
v0 = v0.cpu().numpy()
|
| 86 |
+
v1 = v1.cpu().numpy()
|
| 87 |
+
|
| 88 |
+
if isinstance(t, torch.Tensor):
|
| 89 |
+
t = t.cpu().numpy()
|
| 90 |
+
else:
|
| 91 |
+
t_is_float = True
|
| 92 |
+
t = np.array([t], dtype=v0.dtype)
|
| 93 |
+
|
| 94 |
+
t = t[..., None]
|
| 95 |
+
v0 = v0[None, ...]
|
| 96 |
+
v1 = v1[None, ...]
|
| 97 |
+
v2 = (1 - t) * v0 + t * v1
|
| 98 |
+
|
| 99 |
+
if t_is_float and v0.ndim > 1:
|
| 100 |
+
assert v2.shape[0] == 1
|
| 101 |
+
v2 = np.squeeze(v2, axis=0)
|
| 102 |
+
|
| 103 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 104 |
+
return v2
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def slerp(
|
| 108 |
+
v0: torch.Tensor,
|
| 109 |
+
v1: torch.Tensor,
|
| 110 |
+
t: Union[float, torch.Tensor],
|
| 111 |
+
DOT_THRESHOLD: float = 0.9995,
|
| 112 |
+
) -> torch.Tensor:
|
| 113 |
+
r"""
|
| 114 |
+
Spherical Linear Interpolation between two tensors.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
v0 (`torch.Tensor`): First tensor.
|
| 118 |
+
v1 (`torch.Tensor`): Second tensor.
|
| 119 |
+
t: (`float` or `torch.Tensor`): Interpolation factor.
|
| 120 |
+
DOT_THRESHOLD (`float`):
|
| 121 |
+
Dot product threshold exceeding which linear interpolation will be used
|
| 122 |
+
because input tensors are close to parallel.
|
| 123 |
+
"""
|
| 124 |
+
t_is_float = False
|
| 125 |
+
input_device = v0.device
|
| 126 |
+
v0 = v0.cpu().numpy()
|
| 127 |
+
v1 = v1.cpu().numpy()
|
| 128 |
+
|
| 129 |
+
if isinstance(t, torch.Tensor):
|
| 130 |
+
t = t.cpu().numpy()
|
| 131 |
+
else:
|
| 132 |
+
t_is_float = True
|
| 133 |
+
t = np.array([t], dtype=v0.dtype)
|
| 134 |
+
|
| 135 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 136 |
+
|
| 137 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 138 |
+
# v0 and v1 are close to parallel, so use linear interpolation instead
|
| 139 |
+
v2 = lerp(v0, v1, t)
|
| 140 |
+
else:
|
| 141 |
+
theta_0 = np.arccos(dot)
|
| 142 |
+
sin_theta_0 = np.sin(theta_0)
|
| 143 |
+
theta_t = theta_0 * t
|
| 144 |
+
sin_theta_t = np.sin(theta_t)
|
| 145 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 146 |
+
s1 = sin_theta_t / sin_theta_0
|
| 147 |
+
s0 = s0[..., None]
|
| 148 |
+
s1 = s1[..., None]
|
| 149 |
+
v0 = v0[None, ...]
|
| 150 |
+
v1 = v1[None, ...]
|
| 151 |
+
v2 = s0 * v0 + s1 * v1
|
| 152 |
+
|
| 153 |
+
if t_is_float and v0.ndim > 1:
|
| 154 |
+
assert v2.shape[0] == 1
|
| 155 |
+
v2 = np.squeeze(v2, axis=0)
|
| 156 |
+
|
| 157 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 158 |
+
return v2
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
|
| 162 |
+
def tensor2vid(video: torch.Tensor, processor, output_type="np"):
|
| 163 |
+
batch_size, channels, num_frames, height, width = video.shape
|
| 164 |
+
outputs = []
|
| 165 |
+
for batch_idx in range(batch_size):
|
| 166 |
+
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
| 167 |
+
batch_output = processor.postprocess(batch_vid, output_type)
|
| 168 |
+
|
| 169 |
+
outputs.append(batch_output)
|
| 170 |
+
|
| 171 |
+
if output_type == "np":
|
| 172 |
+
outputs = np.stack(outputs)
|
| 173 |
+
|
| 174 |
+
elif output_type == "pt":
|
| 175 |
+
outputs = torch.stack(outputs)
|
| 176 |
+
|
| 177 |
+
elif not output_type == "pil":
|
| 178 |
+
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
| 179 |
+
|
| 180 |
+
return outputs
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 184 |
+
def retrieve_latents(
|
| 185 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 186 |
+
):
|
| 187 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 188 |
+
return encoder_output.latent_dist.sample(generator)
|
| 189 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 190 |
+
return encoder_output.latent_dist.mode()
|
| 191 |
+
elif hasattr(encoder_output, "latents"):
|
| 192 |
+
return encoder_output.latents
|
| 193 |
+
else:
|
| 194 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 198 |
+
def retrieve_timesteps(
|
| 199 |
+
scheduler,
|
| 200 |
+
num_inference_steps: Optional[int] = None,
|
| 201 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 202 |
+
timesteps: Optional[List[int]] = None,
|
| 203 |
+
**kwargs,
|
| 204 |
+
):
|
| 205 |
+
"""
|
| 206 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 207 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
scheduler (`SchedulerMixin`):
|
| 211 |
+
The scheduler to get timesteps from.
|
| 212 |
+
num_inference_steps (`int`):
|
| 213 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 214 |
+
`timesteps` must be `None`.
|
| 215 |
+
device (`str` or `torch.device`, *optional*):
|
| 216 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 217 |
+
timesteps (`List[int]`, *optional*):
|
| 218 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 219 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 220 |
+
must be `None`.
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 224 |
+
second element is the number of inference steps.
|
| 225 |
+
"""
|
| 226 |
+
if timesteps is not None:
|
| 227 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 228 |
+
if not accepts_timesteps:
|
| 229 |
+
raise ValueError(
|
| 230 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 231 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 232 |
+
)
|
| 233 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 234 |
+
timesteps = scheduler.timesteps
|
| 235 |
+
num_inference_steps = len(timesteps)
|
| 236 |
+
else:
|
| 237 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 238 |
+
timesteps = scheduler.timesteps
|
| 239 |
+
return timesteps, num_inference_steps
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class AnimateDiffImgToVideoPipeline(
|
| 243 |
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
|
| 244 |
+
):
|
| 245 |
+
r"""
|
| 246 |
+
Pipeline for image-to-video generation.
|
| 247 |
+
|
| 248 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 249 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 250 |
+
|
| 251 |
+
The pipeline also inherits the following loading methods:
|
| 252 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 253 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 254 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 255 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
vae ([`AutoencoderKL`]):
|
| 259 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 260 |
+
text_encoder ([`CLIPTextModel`]):
|
| 261 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 262 |
+
tokenizer (`CLIPTokenizer`):
|
| 263 |
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
| 264 |
+
unet ([`UNet2DConditionModel`]):
|
| 265 |
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
| 266 |
+
motion_adapter ([`MotionAdapter`]):
|
| 267 |
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
| 268 |
+
scheduler ([`SchedulerMixin`]):
|
| 269 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 270 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 274 |
+
_optional_components = ["feature_extractor", "image_encoder"]
|
| 275 |
+
|
| 276 |
+
def __init__(
|
| 277 |
+
self,
|
| 278 |
+
vae: AutoencoderKL,
|
| 279 |
+
text_encoder: CLIPTextModel,
|
| 280 |
+
tokenizer: CLIPTokenizer,
|
| 281 |
+
unet: UNet2DConditionModel,
|
| 282 |
+
motion_adapter: MotionAdapter,
|
| 283 |
+
scheduler: Union[
|
| 284 |
+
DDIMScheduler,
|
| 285 |
+
PNDMScheduler,
|
| 286 |
+
LMSDiscreteScheduler,
|
| 287 |
+
EulerDiscreteScheduler,
|
| 288 |
+
EulerAncestralDiscreteScheduler,
|
| 289 |
+
DPMSolverMultistepScheduler,
|
| 290 |
+
],
|
| 291 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 292 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 293 |
+
):
|
| 294 |
+
super().__init__()
|
| 295 |
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
| 296 |
+
|
| 297 |
+
self.register_modules(
|
| 298 |
+
vae=vae,
|
| 299 |
+
text_encoder=text_encoder,
|
| 300 |
+
tokenizer=tokenizer,
|
| 301 |
+
unet=unet,
|
| 302 |
+
motion_adapter=motion_adapter,
|
| 303 |
+
scheduler=scheduler,
|
| 304 |
+
feature_extractor=feature_extractor,
|
| 305 |
+
image_encoder=image_encoder,
|
| 306 |
+
)
|
| 307 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 308 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 309 |
+
|
| 310 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
| 311 |
+
def encode_prompt(
|
| 312 |
+
self,
|
| 313 |
+
prompt,
|
| 314 |
+
device,
|
| 315 |
+
num_images_per_prompt,
|
| 316 |
+
do_classifier_free_guidance,
|
| 317 |
+
negative_prompt=None,
|
| 318 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 319 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 320 |
+
lora_scale: Optional[float] = None,
|
| 321 |
+
clip_skip: Optional[int] = None,
|
| 322 |
+
):
|
| 323 |
+
r"""
|
| 324 |
+
Encodes the prompt into text encoder hidden states.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 328 |
+
prompt to be encoded
|
| 329 |
+
device: (`torch.device`):
|
| 330 |
+
torch device
|
| 331 |
+
num_images_per_prompt (`int`):
|
| 332 |
+
number of images that should be generated per prompt
|
| 333 |
+
do_classifier_free_guidance (`bool`):
|
| 334 |
+
whether to use classifier free guidance or not
|
| 335 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 336 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 337 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 338 |
+
less than `1`).
|
| 339 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 340 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 341 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 342 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 343 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 344 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 345 |
+
argument.
|
| 346 |
+
lora_scale (`float`, *optional*):
|
| 347 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 348 |
+
clip_skip (`int`, *optional*):
|
| 349 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 350 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 351 |
+
"""
|
| 352 |
+
# set lora scale so that monkey patched LoRA
|
| 353 |
+
# function of text encoder can correctly access it
|
| 354 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 355 |
+
self._lora_scale = lora_scale
|
| 356 |
+
|
| 357 |
+
# dynamically adjust the LoRA scale
|
| 358 |
+
if not USE_PEFT_BACKEND:
|
| 359 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 360 |
+
else:
|
| 361 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 362 |
+
|
| 363 |
+
if prompt is not None and isinstance(prompt, str):
|
| 364 |
+
batch_size = 1
|
| 365 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 366 |
+
batch_size = len(prompt)
|
| 367 |
+
else:
|
| 368 |
+
batch_size = prompt_embeds.shape[0]
|
| 369 |
+
|
| 370 |
+
if prompt_embeds is None:
|
| 371 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 372 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 373 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 374 |
+
|
| 375 |
+
text_inputs = self.tokenizer(
|
| 376 |
+
prompt,
|
| 377 |
+
padding="max_length",
|
| 378 |
+
max_length=self.tokenizer.model_max_length,
|
| 379 |
+
truncation=True,
|
| 380 |
+
return_tensors="pt",
|
| 381 |
+
)
|
| 382 |
+
text_input_ids = text_inputs.input_ids
|
| 383 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 384 |
+
|
| 385 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 386 |
+
text_input_ids, untruncated_ids
|
| 387 |
+
):
|
| 388 |
+
removed_text = self.tokenizer.batch_decode(
|
| 389 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 390 |
+
)
|
| 391 |
+
logger.warning(
|
| 392 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 393 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 397 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 398 |
+
else:
|
| 399 |
+
attention_mask = None
|
| 400 |
+
|
| 401 |
+
if clip_skip is None:
|
| 402 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 403 |
+
prompt_embeds = prompt_embeds[0]
|
| 404 |
+
else:
|
| 405 |
+
prompt_embeds = self.text_encoder(
|
| 406 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 407 |
+
)
|
| 408 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 409 |
+
# all the hidden states from the encoder layers. Then index into
|
| 410 |
+
# the tuple to access the hidden states from the desired layer.
|
| 411 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 412 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 413 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 414 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 415 |
+
# layer.
|
| 416 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 417 |
+
|
| 418 |
+
if self.text_encoder is not None:
|
| 419 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 420 |
+
elif self.unet is not None:
|
| 421 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 422 |
+
else:
|
| 423 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 424 |
+
|
| 425 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 426 |
+
|
| 427 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 428 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 429 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 430 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 431 |
+
|
| 432 |
+
# get unconditional embeddings for classifier free guidance
|
| 433 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 434 |
+
uncond_tokens: List[str]
|
| 435 |
+
if negative_prompt is None:
|
| 436 |
+
uncond_tokens = [""] * batch_size
|
| 437 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 438 |
+
raise TypeError(
|
| 439 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 440 |
+
f" {type(prompt)}."
|
| 441 |
+
)
|
| 442 |
+
elif isinstance(negative_prompt, str):
|
| 443 |
+
uncond_tokens = [negative_prompt]
|
| 444 |
+
elif batch_size != len(negative_prompt):
|
| 445 |
+
raise ValueError(
|
| 446 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 447 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 448 |
+
" the batch size of `prompt`."
|
| 449 |
+
)
|
| 450 |
+
else:
|
| 451 |
+
uncond_tokens = negative_prompt
|
| 452 |
+
|
| 453 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 454 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 455 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 456 |
+
|
| 457 |
+
max_length = prompt_embeds.shape[1]
|
| 458 |
+
uncond_input = self.tokenizer(
|
| 459 |
+
uncond_tokens,
|
| 460 |
+
padding="max_length",
|
| 461 |
+
max_length=max_length,
|
| 462 |
+
truncation=True,
|
| 463 |
+
return_tensors="pt",
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 467 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 468 |
+
else:
|
| 469 |
+
attention_mask = None
|
| 470 |
+
|
| 471 |
+
negative_prompt_embeds = self.text_encoder(
|
| 472 |
+
uncond_input.input_ids.to(device),
|
| 473 |
+
attention_mask=attention_mask,
|
| 474 |
+
)
|
| 475 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 476 |
+
|
| 477 |
+
if do_classifier_free_guidance:
|
| 478 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 479 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 480 |
+
|
| 481 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 482 |
+
|
| 483 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 484 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 485 |
+
|
| 486 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 487 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 488 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 489 |
+
|
| 490 |
+
return prompt_embeds, negative_prompt_embeds
|
| 491 |
+
|
| 492 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 493 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 494 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 495 |
+
|
| 496 |
+
if not isinstance(image, torch.Tensor):
|
| 497 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 498 |
+
|
| 499 |
+
image = image.to(device=device, dtype=dtype)
|
| 500 |
+
if output_hidden_states:
|
| 501 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 502 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 503 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 504 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 505 |
+
).hidden_states[-2]
|
| 506 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 507 |
+
num_images_per_prompt, dim=0
|
| 508 |
+
)
|
| 509 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 510 |
+
else:
|
| 511 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 512 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 513 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 514 |
+
|
| 515 |
+
return image_embeds, uncond_image_embeds
|
| 516 |
+
|
| 517 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 518 |
+
def prepare_ip_adapter_image_embeds(
|
| 519 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
|
| 520 |
+
):
|
| 521 |
+
if ip_adapter_image_embeds is None:
|
| 522 |
+
if not isinstance(ip_adapter_image, list):
|
| 523 |
+
ip_adapter_image = [ip_adapter_image]
|
| 524 |
+
|
| 525 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 526 |
+
raise ValueError(
|
| 527 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
image_embeds = []
|
| 531 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 532 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 533 |
+
):
|
| 534 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 535 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 536 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 537 |
+
)
|
| 538 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 539 |
+
single_negative_image_embeds = torch.stack(
|
| 540 |
+
[single_negative_image_embeds] * num_images_per_prompt, dim=0
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
if self.do_classifier_free_guidance:
|
| 544 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 545 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 546 |
+
|
| 547 |
+
image_embeds.append(single_image_embeds)
|
| 548 |
+
else:
|
| 549 |
+
image_embeds = ip_adapter_image_embeds
|
| 550 |
+
return image_embeds
|
| 551 |
+
|
| 552 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
|
| 553 |
+
def decode_latents(self, latents):
|
| 554 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 555 |
+
|
| 556 |
+
batch_size, channels, num_frames, height, width = latents.shape
|
| 557 |
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 558 |
+
|
| 559 |
+
image = self.vae.decode(latents).sample
|
| 560 |
+
video = (
|
| 561 |
+
image[None, :]
|
| 562 |
+
.reshape(
|
| 563 |
+
(
|
| 564 |
+
batch_size,
|
| 565 |
+
num_frames,
|
| 566 |
+
-1,
|
| 567 |
+
)
|
| 568 |
+
+ image.shape[2:]
|
| 569 |
+
)
|
| 570 |
+
.permute(0, 2, 1, 3, 4)
|
| 571 |
+
)
|
| 572 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 573 |
+
video = video.float()
|
| 574 |
+
return video
|
| 575 |
+
|
| 576 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 577 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 578 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 579 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 580 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 581 |
+
# and should be between [0, 1]
|
| 582 |
+
|
| 583 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 584 |
+
extra_step_kwargs = {}
|
| 585 |
+
if accepts_eta:
|
| 586 |
+
extra_step_kwargs["eta"] = eta
|
| 587 |
+
|
| 588 |
+
# check if the scheduler accepts generator
|
| 589 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 590 |
+
if accepts_generator:
|
| 591 |
+
extra_step_kwargs["generator"] = generator
|
| 592 |
+
return extra_step_kwargs
|
| 593 |
+
|
| 594 |
+
def check_inputs(
|
| 595 |
+
self,
|
| 596 |
+
prompt,
|
| 597 |
+
height,
|
| 598 |
+
width,
|
| 599 |
+
callback_steps,
|
| 600 |
+
negative_prompt=None,
|
| 601 |
+
prompt_embeds=None,
|
| 602 |
+
negative_prompt_embeds=None,
|
| 603 |
+
callback_on_step_end_tensor_inputs=None,
|
| 604 |
+
latent_interpolation_method=None,
|
| 605 |
+
):
|
| 606 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 607 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 608 |
+
|
| 609 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 610 |
+
raise ValueError(
|
| 611 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 612 |
+
f" {type(callback_steps)}."
|
| 613 |
+
)
|
| 614 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 615 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 616 |
+
):
|
| 617 |
+
raise ValueError(
|
| 618 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
if prompt is not None and prompt_embeds is not None:
|
| 622 |
+
raise ValueError(
|
| 623 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 624 |
+
" only forward one of the two."
|
| 625 |
+
)
|
| 626 |
+
elif prompt is None and prompt_embeds is None:
|
| 627 |
+
raise ValueError(
|
| 628 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 629 |
+
)
|
| 630 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 631 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 632 |
+
|
| 633 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 634 |
+
raise ValueError(
|
| 635 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 636 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 640 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 641 |
+
raise ValueError(
|
| 642 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 643 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 644 |
+
f" {negative_prompt_embeds.shape}."
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
if latent_interpolation_method is not None:
|
| 648 |
+
if latent_interpolation_method not in ["lerp", "slerp"] and not isinstance(
|
| 649 |
+
latent_interpolation_method, FunctionType
|
| 650 |
+
):
|
| 651 |
+
raise ValueError(
|
| 652 |
+
"`latent_interpolation_method` must be one of `lerp`, `slerp` or a Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]"
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
def prepare_latents(
|
| 656 |
+
self,
|
| 657 |
+
image,
|
| 658 |
+
strength,
|
| 659 |
+
batch_size,
|
| 660 |
+
num_channels_latents,
|
| 661 |
+
num_frames,
|
| 662 |
+
height,
|
| 663 |
+
width,
|
| 664 |
+
dtype,
|
| 665 |
+
device,
|
| 666 |
+
generator,
|
| 667 |
+
latents=None,
|
| 668 |
+
latent_interpolation_method="slerp",
|
| 669 |
+
):
|
| 670 |
+
shape = (
|
| 671 |
+
batch_size,
|
| 672 |
+
num_channels_latents,
|
| 673 |
+
num_frames,
|
| 674 |
+
height // self.vae_scale_factor,
|
| 675 |
+
width // self.vae_scale_factor,
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
if latents is None:
|
| 679 |
+
image = image.to(device=device, dtype=dtype)
|
| 680 |
+
|
| 681 |
+
if image.shape[1] == 4:
|
| 682 |
+
latents = image
|
| 683 |
+
else:
|
| 684 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 685 |
+
if self.vae.config.force_upcast:
|
| 686 |
+
image = image.float()
|
| 687 |
+
self.vae.to(dtype=torch.float32)
|
| 688 |
+
|
| 689 |
+
if isinstance(generator, list):
|
| 690 |
+
if len(generator) != batch_size:
|
| 691 |
+
raise ValueError(
|
| 692 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 693 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
init_latents = [
|
| 697 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 698 |
+
for i in range(batch_size)
|
| 699 |
+
]
|
| 700 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 701 |
+
else:
|
| 702 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 703 |
+
|
| 704 |
+
if self.vae.config.force_upcast:
|
| 705 |
+
self.vae.to(dtype)
|
| 706 |
+
|
| 707 |
+
init_latents = init_latents.to(dtype)
|
| 708 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 709 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 710 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 711 |
+
|
| 712 |
+
if latent_interpolation_method == "lerp":
|
| 713 |
+
|
| 714 |
+
def latent_cls(v0, v1, index):
|
| 715 |
+
return lerp(v0, v1, index / num_frames * (1 - strength))
|
| 716 |
+
elif latent_interpolation_method == "slerp":
|
| 717 |
+
|
| 718 |
+
def latent_cls(v0, v1, index):
|
| 719 |
+
return slerp(v0, v1, index / num_frames * (1 - strength))
|
| 720 |
+
else:
|
| 721 |
+
latent_cls = latent_interpolation_method
|
| 722 |
+
|
| 723 |
+
for i in range(num_frames):
|
| 724 |
+
latents[:, :, i, :, :] = latent_cls(latents[:, :, i, :, :], init_latents, i)
|
| 725 |
+
else:
|
| 726 |
+
if shape != latents.shape:
|
| 727 |
+
# [B, C, F, H, W]
|
| 728 |
+
raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
|
| 729 |
+
latents = latents.to(device, dtype=dtype)
|
| 730 |
+
|
| 731 |
+
return latents
|
| 732 |
+
|
| 733 |
+
@torch.no_grad()
|
| 734 |
+
def __call__(
|
| 735 |
+
self,
|
| 736 |
+
image: PipelineImageInput,
|
| 737 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 738 |
+
height: Optional[int] = None,
|
| 739 |
+
width: Optional[int] = None,
|
| 740 |
+
num_frames: int = 16,
|
| 741 |
+
num_inference_steps: int = 50,
|
| 742 |
+
timesteps: Optional[List[int]] = None,
|
| 743 |
+
guidance_scale: float = 7.5,
|
| 744 |
+
strength: float = 0.8,
|
| 745 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 746 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 747 |
+
eta: float = 0.0,
|
| 748 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 749 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 750 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 751 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 752 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 753 |
+
ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
|
| 754 |
+
output_type: Optional[str] = "pil",
|
| 755 |
+
return_dict: bool = True,
|
| 756 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 757 |
+
callback_steps: Optional[int] = 1,
|
| 758 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 759 |
+
clip_skip: Optional[int] = None,
|
| 760 |
+
latent_interpolation_method: Union[str, Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]] = "slerp",
|
| 761 |
+
):
|
| 762 |
+
r"""
|
| 763 |
+
The call function to the pipeline for generation.
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
image (`PipelineImageInput`):
|
| 767 |
+
The input image to condition the generation on.
|
| 768 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 769 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 770 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 771 |
+
The height in pixels of the generated video.
|
| 772 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 773 |
+
The width in pixels of the generated video.
|
| 774 |
+
num_frames (`int`, *optional*, defaults to 16):
|
| 775 |
+
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
|
| 776 |
+
amounts to 2 seconds of video.
|
| 777 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 778 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
| 779 |
+
expense of slower inference.
|
| 780 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 781 |
+
Higher strength leads to more differences between original image and generated video.
|
| 782 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 783 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 784 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 785 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 786 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 787 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 788 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 789 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 790 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 791 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 792 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 793 |
+
generation deterministic.
|
| 794 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 795 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
| 796 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 797 |
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
| 798 |
+
`(batch_size, num_channel, num_frames, height, width)`.
|
| 799 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 800 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 801 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 802 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 803 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 804 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 805 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
| 806 |
+
Optional image input to work with IP Adapters.
|
| 807 |
+
ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
|
| 808 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
|
| 809 |
+
Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
|
| 810 |
+
if `do_classifier_free_guidance` is set to `True`.
|
| 811 |
+
If not provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 812 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 813 |
+
The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
|
| 814 |
+
`np.array`.
|
| 815 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 816 |
+
Whether or not to return a [`AnimateDiffImgToVideoPipelineOutput`] instead
|
| 817 |
+
of a plain tuple.
|
| 818 |
+
callback (`Callable`, *optional*):
|
| 819 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 820 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 821 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 822 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 823 |
+
every step.
|
| 824 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 825 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 826 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 827 |
+
clip_skip (`int`, *optional*):
|
| 828 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 829 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 830 |
+
latent_interpolation_method (`str` or `Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]]`, *optional*):
|
| 831 |
+
Must be one of "lerp", "slerp" or a callable that takes in a random noisy latent, image latent and a frame index
|
| 832 |
+
as input and returns an initial latent for sampling.
|
| 833 |
+
Examples:
|
| 834 |
+
|
| 835 |
+
Returns:
|
| 836 |
+
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
| 837 |
+
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
| 838 |
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
| 839 |
+
"""
|
| 840 |
+
# 0. Default height and width to unet
|
| 841 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 842 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 843 |
+
|
| 844 |
+
num_videos_per_prompt = 1
|
| 845 |
+
|
| 846 |
+
# 1. Check inputs. Raise error if not correct
|
| 847 |
+
self.check_inputs(
|
| 848 |
+
prompt=prompt,
|
| 849 |
+
height=height,
|
| 850 |
+
width=width,
|
| 851 |
+
callback_steps=callback_steps,
|
| 852 |
+
negative_prompt=negative_prompt,
|
| 853 |
+
prompt_embeds=prompt_embeds,
|
| 854 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 855 |
+
latent_interpolation_method=latent_interpolation_method,
|
| 856 |
+
)
|
| 857 |
+
|
| 858 |
+
# 2. Define call parameters
|
| 859 |
+
if prompt is not None and isinstance(prompt, str):
|
| 860 |
+
batch_size = 1
|
| 861 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 862 |
+
batch_size = len(prompt)
|
| 863 |
+
else:
|
| 864 |
+
batch_size = prompt_embeds.shape[0]
|
| 865 |
+
|
| 866 |
+
device = self._execution_device
|
| 867 |
+
|
| 868 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 869 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 870 |
+
# corresponds to doing no classifier free guidance.
|
| 871 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 872 |
+
|
| 873 |
+
# 3. Encode input prompt
|
| 874 |
+
text_encoder_lora_scale = (
|
| 875 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 876 |
+
)
|
| 877 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 878 |
+
prompt,
|
| 879 |
+
device,
|
| 880 |
+
num_videos_per_prompt,
|
| 881 |
+
do_classifier_free_guidance,
|
| 882 |
+
negative_prompt,
|
| 883 |
+
prompt_embeds=prompt_embeds,
|
| 884 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 885 |
+
lora_scale=text_encoder_lora_scale,
|
| 886 |
+
clip_skip=clip_skip,
|
| 887 |
+
)
|
| 888 |
+
|
| 889 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 890 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 891 |
+
# to avoid doing two forward passes
|
| 892 |
+
if do_classifier_free_guidance:
|
| 893 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 894 |
+
|
| 895 |
+
if ip_adapter_image is not None:
|
| 896 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 897 |
+
ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
|
| 898 |
+
)
|
| 899 |
+
|
| 900 |
+
# 4. Preprocess image
|
| 901 |
+
image = self.image_processor.preprocess(image, height=height, width=width)
|
| 902 |
+
|
| 903 |
+
# 5. Prepare timesteps
|
| 904 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 905 |
+
|
| 906 |
+
# 6. Prepare latent variables
|
| 907 |
+
num_channels_latents = self.unet.config.in_channels
|
| 908 |
+
latents = self.prepare_latents(
|
| 909 |
+
image=image,
|
| 910 |
+
strength=strength,
|
| 911 |
+
batch_size=batch_size * num_videos_per_prompt,
|
| 912 |
+
num_channels_latents=num_channels_latents,
|
| 913 |
+
num_frames=num_frames,
|
| 914 |
+
height=height,
|
| 915 |
+
width=width,
|
| 916 |
+
dtype=prompt_embeds.dtype,
|
| 917 |
+
device=device,
|
| 918 |
+
generator=generator,
|
| 919 |
+
latents=latents,
|
| 920 |
+
latent_interpolation_method=latent_interpolation_method,
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 924 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 925 |
+
|
| 926 |
+
# 8. Add image embeds for IP-Adapter
|
| 927 |
+
added_cond_kwargs = (
|
| 928 |
+
{"image_embeds": image_embeds}
|
| 929 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
| 930 |
+
else None
|
| 931 |
+
)
|
| 932 |
+
|
| 933 |
+
# 9. Denoising loop
|
| 934 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 935 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 936 |
+
for i, t in enumerate(timesteps):
|
| 937 |
+
# expand the latents if we are doing classifier free guidance
|
| 938 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 939 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 940 |
+
|
| 941 |
+
# predict the noise residual
|
| 942 |
+
noise_pred = self.unet(
|
| 943 |
+
latent_model_input,
|
| 944 |
+
t,
|
| 945 |
+
encoder_hidden_states=prompt_embeds,
|
| 946 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 947 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 948 |
+
).sample
|
| 949 |
+
|
| 950 |
+
# perform guidance
|
| 951 |
+
if do_classifier_free_guidance:
|
| 952 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 953 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 954 |
+
|
| 955 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 956 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 957 |
+
|
| 958 |
+
# call the callback, if provided
|
| 959 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 960 |
+
progress_bar.update()
|
| 961 |
+
if callback is not None and i % callback_steps == 0:
|
| 962 |
+
callback(i, t, latents)
|
| 963 |
+
|
| 964 |
+
if output_type == "latent":
|
| 965 |
+
return AnimateDiffPipelineOutput(frames=latents)
|
| 966 |
+
|
| 967 |
+
# 10. Post-processing
|
| 968 |
+
if output_type == "latent":
|
| 969 |
+
video = latents
|
| 970 |
+
else:
|
| 971 |
+
video_tensor = self.decode_latents(latents)
|
| 972 |
+
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
| 973 |
+
|
| 974 |
+
# 11. Offload all models
|
| 975 |
+
self.maybe_free_model_hooks()
|
| 976 |
+
|
| 977 |
+
if not return_dict:
|
| 978 |
+
return (video,)
|
| 979 |
+
|
| 980 |
+
return AnimateDiffPipelineOutput(frames=video)
|
v0.27.0/pipeline_demofusion_sdxl.py
ADDED
|
@@ -0,0 +1,1383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import warnings
|
| 5 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 13 |
+
from diffusers.loaders import (
|
| 14 |
+
FromSingleFileMixin,
|
| 15 |
+
LoraLoaderMixin,
|
| 16 |
+
TextualInversionLoaderMixin,
|
| 17 |
+
)
|
| 18 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 19 |
+
from diffusers.models.attention_processor import (
|
| 20 |
+
AttnProcessor2_0,
|
| 21 |
+
LoRAAttnProcessor2_0,
|
| 22 |
+
LoRAXFormersAttnProcessor,
|
| 23 |
+
XFormersAttnProcessor,
|
| 24 |
+
)
|
| 25 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 26 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 27 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 28 |
+
from diffusers.utils import (
|
| 29 |
+
is_accelerate_available,
|
| 30 |
+
is_accelerate_version,
|
| 31 |
+
is_invisible_watermark_available,
|
| 32 |
+
logging,
|
| 33 |
+
replace_example_docstring,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
if is_invisible_watermark_available():
|
| 39 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import (
|
| 40 |
+
StableDiffusionXLWatermarker,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 45 |
+
|
| 46 |
+
EXAMPLE_DOC_STRING = """
|
| 47 |
+
Examples:
|
| 48 |
+
```py
|
| 49 |
+
>>> import torch
|
| 50 |
+
>>> from diffusers import StableDiffusionXLPipeline
|
| 51 |
+
|
| 52 |
+
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 53 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 54 |
+
... )
|
| 55 |
+
>>> pipe = pipe.to("cuda")
|
| 56 |
+
|
| 57 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 58 |
+
>>> image = pipe(prompt).images[0]
|
| 59 |
+
```
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
|
| 64 |
+
x_coord = torch.arange(kernel_size)
|
| 65 |
+
gaussian_1d = torch.exp(-((x_coord - (kernel_size - 1) / 2) ** 2) / (2 * sigma**2))
|
| 66 |
+
gaussian_1d = gaussian_1d / gaussian_1d.sum()
|
| 67 |
+
gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
|
| 68 |
+
kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
|
| 69 |
+
|
| 70 |
+
return kernel
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
|
| 74 |
+
channels = latents.shape[1]
|
| 75 |
+
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
|
| 76 |
+
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size // 2, groups=channels)
|
| 77 |
+
|
| 78 |
+
return blurred_latents
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 82 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 83 |
+
"""
|
| 84 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 85 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 86 |
+
"""
|
| 87 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 88 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 89 |
+
# rescale the results from guidance (fixes overexposure)
|
| 90 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 91 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 92 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 93 |
+
return noise_cfg
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class DemoFusionSDXLPipeline(
|
| 97 |
+
DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 98 |
+
):
|
| 99 |
+
r"""
|
| 100 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
| 101 |
+
|
| 102 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 103 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 104 |
+
|
| 105 |
+
In addition the pipeline inherits the following loading methods:
|
| 106 |
+
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
|
| 107 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
| 108 |
+
|
| 109 |
+
as well as the following saving methods:
|
| 110 |
+
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
vae ([`AutoencoderKL`]):
|
| 114 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 115 |
+
text_encoder ([`CLIPTextModel`]):
|
| 116 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 117 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 118 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 119 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 120 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 121 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 122 |
+
specifically the
|
| 123 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 124 |
+
variant.
|
| 125 |
+
tokenizer (`CLIPTokenizer`):
|
| 126 |
+
Tokenizer of class
|
| 127 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 128 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 129 |
+
Second Tokenizer of class
|
| 130 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 131 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 132 |
+
scheduler ([`SchedulerMixin`]):
|
| 133 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 134 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 135 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 136 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 137 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 138 |
+
add_watermarker (`bool`, *optional*):
|
| 139 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 140 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 141 |
+
watermarker will be used.
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 145 |
+
|
| 146 |
+
def __init__(
|
| 147 |
+
self,
|
| 148 |
+
vae: AutoencoderKL,
|
| 149 |
+
text_encoder: CLIPTextModel,
|
| 150 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 151 |
+
tokenizer: CLIPTokenizer,
|
| 152 |
+
tokenizer_2: CLIPTokenizer,
|
| 153 |
+
unet: UNet2DConditionModel,
|
| 154 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 155 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 156 |
+
add_watermarker: Optional[bool] = None,
|
| 157 |
+
):
|
| 158 |
+
super().__init__()
|
| 159 |
+
|
| 160 |
+
self.register_modules(
|
| 161 |
+
vae=vae,
|
| 162 |
+
text_encoder=text_encoder,
|
| 163 |
+
text_encoder_2=text_encoder_2,
|
| 164 |
+
tokenizer=tokenizer,
|
| 165 |
+
tokenizer_2=tokenizer_2,
|
| 166 |
+
unet=unet,
|
| 167 |
+
scheduler=scheduler,
|
| 168 |
+
)
|
| 169 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 170 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 171 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 172 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 173 |
+
|
| 174 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 175 |
+
|
| 176 |
+
if add_watermarker:
|
| 177 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 178 |
+
else:
|
| 179 |
+
self.watermark = None
|
| 180 |
+
|
| 181 |
+
def encode_prompt(
|
| 182 |
+
self,
|
| 183 |
+
prompt: str,
|
| 184 |
+
prompt_2: Optional[str] = None,
|
| 185 |
+
device: Optional[torch.device] = None,
|
| 186 |
+
num_images_per_prompt: int = 1,
|
| 187 |
+
do_classifier_free_guidance: bool = True,
|
| 188 |
+
negative_prompt: Optional[str] = None,
|
| 189 |
+
negative_prompt_2: Optional[str] = None,
|
| 190 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 191 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 192 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 193 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
lora_scale: Optional[float] = None,
|
| 195 |
+
):
|
| 196 |
+
r"""
|
| 197 |
+
Encodes the prompt into text encoder hidden states.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 201 |
+
prompt to be encoded
|
| 202 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 203 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 204 |
+
used in both text-encoders
|
| 205 |
+
device: (`torch.device`):
|
| 206 |
+
torch device
|
| 207 |
+
num_images_per_prompt (`int`):
|
| 208 |
+
number of images that should be generated per prompt
|
| 209 |
+
do_classifier_free_guidance (`bool`):
|
| 210 |
+
whether to use classifier free guidance or not
|
| 211 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 212 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 213 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 214 |
+
less than `1`).
|
| 215 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 216 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 217 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 218 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 219 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 220 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 221 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 222 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 223 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 224 |
+
argument.
|
| 225 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 226 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 227 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 228 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 229 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 230 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 231 |
+
input argument.
|
| 232 |
+
lora_scale (`float`, *optional*):
|
| 233 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 234 |
+
"""
|
| 235 |
+
device = device or self._execution_device
|
| 236 |
+
|
| 237 |
+
# set lora scale so that monkey patched LoRA
|
| 238 |
+
# function of text encoder can correctly access it
|
| 239 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 240 |
+
self._lora_scale = lora_scale
|
| 241 |
+
|
| 242 |
+
# dynamically adjust the LoRA scale
|
| 243 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 244 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 245 |
+
|
| 246 |
+
if prompt is not None and isinstance(prompt, str):
|
| 247 |
+
batch_size = 1
|
| 248 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 249 |
+
batch_size = len(prompt)
|
| 250 |
+
else:
|
| 251 |
+
batch_size = prompt_embeds.shape[0]
|
| 252 |
+
|
| 253 |
+
# Define tokenizers and text encoders
|
| 254 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 255 |
+
text_encoders = (
|
| 256 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if prompt_embeds is None:
|
| 260 |
+
prompt_2 = prompt_2 or prompt
|
| 261 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 262 |
+
prompt_embeds_list = []
|
| 263 |
+
prompts = [prompt, prompt_2]
|
| 264 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 265 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 266 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 267 |
+
|
| 268 |
+
text_inputs = tokenizer(
|
| 269 |
+
prompt,
|
| 270 |
+
padding="max_length",
|
| 271 |
+
max_length=tokenizer.model_max_length,
|
| 272 |
+
truncation=True,
|
| 273 |
+
return_tensors="pt",
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
text_input_ids = text_inputs.input_ids
|
| 277 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 278 |
+
|
| 279 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 280 |
+
text_input_ids, untruncated_ids
|
| 281 |
+
):
|
| 282 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 283 |
+
logger.warning(
|
| 284 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 285 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
prompt_embeds = text_encoder(
|
| 289 |
+
text_input_ids.to(device),
|
| 290 |
+
output_hidden_states=True,
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 294 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 295 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 296 |
+
|
| 297 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 298 |
+
|
| 299 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 300 |
+
|
| 301 |
+
# get unconditional embeddings for classifier free guidance
|
| 302 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 303 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 304 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 305 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 306 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 307 |
+
negative_prompt = negative_prompt or ""
|
| 308 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 309 |
+
|
| 310 |
+
uncond_tokens: List[str]
|
| 311 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 312 |
+
raise TypeError(
|
| 313 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 314 |
+
f" {type(prompt)}."
|
| 315 |
+
)
|
| 316 |
+
elif isinstance(negative_prompt, str):
|
| 317 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 318 |
+
elif batch_size != len(negative_prompt):
|
| 319 |
+
raise ValueError(
|
| 320 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 321 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 322 |
+
" the batch size of `prompt`."
|
| 323 |
+
)
|
| 324 |
+
else:
|
| 325 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 326 |
+
|
| 327 |
+
negative_prompt_embeds_list = []
|
| 328 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 329 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 330 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 331 |
+
|
| 332 |
+
max_length = prompt_embeds.shape[1]
|
| 333 |
+
uncond_input = tokenizer(
|
| 334 |
+
negative_prompt,
|
| 335 |
+
padding="max_length",
|
| 336 |
+
max_length=max_length,
|
| 337 |
+
truncation=True,
|
| 338 |
+
return_tensors="pt",
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
negative_prompt_embeds = text_encoder(
|
| 342 |
+
uncond_input.input_ids.to(device),
|
| 343 |
+
output_hidden_states=True,
|
| 344 |
+
)
|
| 345 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 346 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 347 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 348 |
+
|
| 349 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 350 |
+
|
| 351 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 352 |
+
|
| 353 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 354 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 355 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 356 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 357 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 358 |
+
|
| 359 |
+
if do_classifier_free_guidance:
|
| 360 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 361 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 362 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 363 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 364 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 365 |
+
|
| 366 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 367 |
+
bs_embed * num_images_per_prompt, -1
|
| 368 |
+
)
|
| 369 |
+
if do_classifier_free_guidance:
|
| 370 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 371 |
+
bs_embed * num_images_per_prompt, -1
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 375 |
+
|
| 376 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 377 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 378 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 379 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 380 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 381 |
+
# and should be between [0, 1]
|
| 382 |
+
|
| 383 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 384 |
+
extra_step_kwargs = {}
|
| 385 |
+
if accepts_eta:
|
| 386 |
+
extra_step_kwargs["eta"] = eta
|
| 387 |
+
|
| 388 |
+
# check if the scheduler accepts generator
|
| 389 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 390 |
+
if accepts_generator:
|
| 391 |
+
extra_step_kwargs["generator"] = generator
|
| 392 |
+
return extra_step_kwargs
|
| 393 |
+
|
| 394 |
+
def check_inputs(
|
| 395 |
+
self,
|
| 396 |
+
prompt,
|
| 397 |
+
prompt_2,
|
| 398 |
+
height,
|
| 399 |
+
width,
|
| 400 |
+
callback_steps,
|
| 401 |
+
negative_prompt=None,
|
| 402 |
+
negative_prompt_2=None,
|
| 403 |
+
prompt_embeds=None,
|
| 404 |
+
negative_prompt_embeds=None,
|
| 405 |
+
pooled_prompt_embeds=None,
|
| 406 |
+
negative_pooled_prompt_embeds=None,
|
| 407 |
+
num_images_per_prompt=None,
|
| 408 |
+
):
|
| 409 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 410 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 411 |
+
|
| 412 |
+
if (callback_steps is None) or (
|
| 413 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 414 |
+
):
|
| 415 |
+
raise ValueError(
|
| 416 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 417 |
+
f" {type(callback_steps)}."
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
if prompt is not None and prompt_embeds is not None:
|
| 421 |
+
raise ValueError(
|
| 422 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 423 |
+
" only forward one of the two."
|
| 424 |
+
)
|
| 425 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 426 |
+
raise ValueError(
|
| 427 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 428 |
+
" only forward one of the two."
|
| 429 |
+
)
|
| 430 |
+
elif prompt is None and prompt_embeds is None:
|
| 431 |
+
raise ValueError(
|
| 432 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 433 |
+
)
|
| 434 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 435 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 436 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 437 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 438 |
+
|
| 439 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 440 |
+
raise ValueError(
|
| 441 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 442 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 443 |
+
)
|
| 444 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 445 |
+
raise ValueError(
|
| 446 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 447 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 451 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 452 |
+
raise ValueError(
|
| 453 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 454 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 455 |
+
f" {negative_prompt_embeds.shape}."
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 459 |
+
raise ValueError(
|
| 460 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 464 |
+
raise ValueError(
|
| 465 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
# DemoFusion specific checks
|
| 469 |
+
if max(height, width) % 1024 != 0:
|
| 470 |
+
raise ValueError(
|
| 471 |
+
f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}."
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
if num_images_per_prompt != 1:
|
| 475 |
+
warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
|
| 476 |
+
num_images_per_prompt = 1
|
| 477 |
+
|
| 478 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 479 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 480 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 481 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 482 |
+
raise ValueError(
|
| 483 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 484 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
if latents is None:
|
| 488 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 489 |
+
else:
|
| 490 |
+
latents = latents.to(device)
|
| 491 |
+
|
| 492 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 493 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 494 |
+
return latents
|
| 495 |
+
|
| 496 |
+
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
| 497 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 498 |
+
|
| 499 |
+
passed_add_embed_dim = (
|
| 500 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
| 501 |
+
)
|
| 502 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 503 |
+
|
| 504 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 510 |
+
return add_time_ids
|
| 511 |
+
|
| 512 |
+
def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
|
| 513 |
+
height //= self.vae_scale_factor
|
| 514 |
+
width //= self.vae_scale_factor
|
| 515 |
+
num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
|
| 516 |
+
num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
|
| 517 |
+
total_num_blocks = int(num_blocks_height * num_blocks_width)
|
| 518 |
+
views = []
|
| 519 |
+
for i in range(total_num_blocks):
|
| 520 |
+
h_start = int((i // num_blocks_width) * stride)
|
| 521 |
+
h_end = h_start + window_size
|
| 522 |
+
w_start = int((i % num_blocks_width) * stride)
|
| 523 |
+
w_end = w_start + window_size
|
| 524 |
+
|
| 525 |
+
if h_end > height:
|
| 526 |
+
h_start = int(h_start + height - h_end)
|
| 527 |
+
h_end = int(height)
|
| 528 |
+
if w_end > width:
|
| 529 |
+
w_start = int(w_start + width - w_end)
|
| 530 |
+
w_end = int(width)
|
| 531 |
+
if h_start < 0:
|
| 532 |
+
h_end = int(h_end - h_start)
|
| 533 |
+
h_start = 0
|
| 534 |
+
if w_start < 0:
|
| 535 |
+
w_end = int(w_end - w_start)
|
| 536 |
+
w_start = 0
|
| 537 |
+
|
| 538 |
+
if random_jitter:
|
| 539 |
+
jitter_range = (window_size - stride) // 4
|
| 540 |
+
w_jitter = 0
|
| 541 |
+
h_jitter = 0
|
| 542 |
+
if (w_start != 0) and (w_end != width):
|
| 543 |
+
w_jitter = random.randint(-jitter_range, jitter_range)
|
| 544 |
+
elif (w_start == 0) and (w_end != width):
|
| 545 |
+
w_jitter = random.randint(-jitter_range, 0)
|
| 546 |
+
elif (w_start != 0) and (w_end == width):
|
| 547 |
+
w_jitter = random.randint(0, jitter_range)
|
| 548 |
+
if (h_start != 0) and (h_end != height):
|
| 549 |
+
h_jitter = random.randint(-jitter_range, jitter_range)
|
| 550 |
+
elif (h_start == 0) and (h_end != height):
|
| 551 |
+
h_jitter = random.randint(-jitter_range, 0)
|
| 552 |
+
elif (h_start != 0) and (h_end == height):
|
| 553 |
+
h_jitter = random.randint(0, jitter_range)
|
| 554 |
+
h_start += h_jitter + jitter_range
|
| 555 |
+
h_end += h_jitter + jitter_range
|
| 556 |
+
w_start += w_jitter + jitter_range
|
| 557 |
+
w_end += w_jitter + jitter_range
|
| 558 |
+
|
| 559 |
+
views.append((h_start, h_end, w_start, w_end))
|
| 560 |
+
return views
|
| 561 |
+
|
| 562 |
+
def tiled_decode(self, latents, current_height, current_width):
|
| 563 |
+
core_size = self.unet.config.sample_size // 4
|
| 564 |
+
core_stride = core_size
|
| 565 |
+
pad_size = self.unet.config.sample_size // 4 * 3
|
| 566 |
+
decoder_view_batch_size = 1
|
| 567 |
+
|
| 568 |
+
views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
|
| 569 |
+
views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
|
| 570 |
+
latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), "constant", 0)
|
| 571 |
+
image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
|
| 572 |
+
count = torch.zeros_like(image).to(latents.device)
|
| 573 |
+
# get the latents corresponding to the current view coordinates
|
| 574 |
+
with self.progress_bar(total=len(views_batch)) as progress_bar:
|
| 575 |
+
for j, batch_view in enumerate(views_batch):
|
| 576 |
+
len(batch_view)
|
| 577 |
+
latents_for_view = torch.cat(
|
| 578 |
+
[
|
| 579 |
+
latents_[:, :, h_start : h_end + pad_size * 2, w_start : w_end + pad_size * 2]
|
| 580 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 581 |
+
]
|
| 582 |
+
)
|
| 583 |
+
image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 584 |
+
h_start, h_end, w_start, w_end = views[j]
|
| 585 |
+
h_start, h_end, w_start, w_end = (
|
| 586 |
+
h_start * self.vae_scale_factor,
|
| 587 |
+
h_end * self.vae_scale_factor,
|
| 588 |
+
w_start * self.vae_scale_factor,
|
| 589 |
+
w_end * self.vae_scale_factor,
|
| 590 |
+
)
|
| 591 |
+
p_h_start, p_h_end, p_w_start, p_w_end = (
|
| 592 |
+
pad_size * self.vae_scale_factor,
|
| 593 |
+
image_patch.size(2) - pad_size * self.vae_scale_factor,
|
| 594 |
+
pad_size * self.vae_scale_factor,
|
| 595 |
+
image_patch.size(3) - pad_size * self.vae_scale_factor,
|
| 596 |
+
)
|
| 597 |
+
image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end]
|
| 598 |
+
count[:, :, h_start:h_end, w_start:w_end] += 1
|
| 599 |
+
progress_bar.update()
|
| 600 |
+
image = image / count
|
| 601 |
+
|
| 602 |
+
return image
|
| 603 |
+
|
| 604 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 605 |
+
def upcast_vae(self):
|
| 606 |
+
dtype = self.vae.dtype
|
| 607 |
+
self.vae.to(dtype=torch.float32)
|
| 608 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 609 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 610 |
+
(
|
| 611 |
+
AttnProcessor2_0,
|
| 612 |
+
XFormersAttnProcessor,
|
| 613 |
+
LoRAXFormersAttnProcessor,
|
| 614 |
+
LoRAAttnProcessor2_0,
|
| 615 |
+
),
|
| 616 |
+
)
|
| 617 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 618 |
+
# to be in float32 which can save lots of memory
|
| 619 |
+
if use_torch_2_0_or_xformers:
|
| 620 |
+
self.vae.post_quant_conv.to(dtype)
|
| 621 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 622 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 623 |
+
|
| 624 |
+
@torch.no_grad()
|
| 625 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 626 |
+
def __call__(
|
| 627 |
+
self,
|
| 628 |
+
prompt: Union[str, List[str]] = None,
|
| 629 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 630 |
+
height: Optional[int] = None,
|
| 631 |
+
width: Optional[int] = None,
|
| 632 |
+
num_inference_steps: int = 50,
|
| 633 |
+
denoising_end: Optional[float] = None,
|
| 634 |
+
guidance_scale: float = 5.0,
|
| 635 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 636 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 637 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 638 |
+
eta: float = 0.0,
|
| 639 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 640 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 641 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 642 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 643 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 644 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 645 |
+
output_type: Optional[str] = "pil",
|
| 646 |
+
return_dict: bool = False,
|
| 647 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 648 |
+
callback_steps: int = 1,
|
| 649 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 650 |
+
guidance_rescale: float = 0.0,
|
| 651 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 652 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 653 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 654 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 655 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 656 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 657 |
+
################### DemoFusion specific parameters ####################
|
| 658 |
+
view_batch_size: int = 16,
|
| 659 |
+
multi_decoder: bool = True,
|
| 660 |
+
stride: Optional[int] = 64,
|
| 661 |
+
cosine_scale_1: Optional[float] = 3.0,
|
| 662 |
+
cosine_scale_2: Optional[float] = 1.0,
|
| 663 |
+
cosine_scale_3: Optional[float] = 1.0,
|
| 664 |
+
sigma: Optional[float] = 0.8,
|
| 665 |
+
show_image: bool = False,
|
| 666 |
+
):
|
| 667 |
+
r"""
|
| 668 |
+
Function invoked when calling the pipeline for generation.
|
| 669 |
+
|
| 670 |
+
Args:
|
| 671 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 672 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 673 |
+
instead.
|
| 674 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 675 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 676 |
+
used in both text-encoders
|
| 677 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 678 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 679 |
+
Anything below 512 pixels won't work well for
|
| 680 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 681 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 682 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 683 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 684 |
+
Anything below 512 pixels won't work well for
|
| 685 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 686 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 687 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 688 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 689 |
+
expense of slower inference.
|
| 690 |
+
denoising_end (`float`, *optional*):
|
| 691 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 692 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 693 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 694 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 695 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 696 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 697 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 698 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 699 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 700 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 701 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 702 |
+
usually at the expense of lower image quality.
|
| 703 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 704 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 705 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 706 |
+
less than `1`).
|
| 707 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 708 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 709 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 710 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 711 |
+
The number of images to generate per prompt.
|
| 712 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 713 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 714 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 715 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 716 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 717 |
+
to make generation deterministic.
|
| 718 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 719 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 720 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 721 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 722 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 723 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 724 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 725 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 726 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 727 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 728 |
+
argument.
|
| 729 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 730 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 731 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 732 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 733 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 734 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 735 |
+
input argument.
|
| 736 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 737 |
+
The output format of the generate image. Choose between
|
| 738 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 739 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 740 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 741 |
+
of a plain tuple.
|
| 742 |
+
callback (`Callable`, *optional*):
|
| 743 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 744 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 745 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 746 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 747 |
+
called at every step.
|
| 748 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 749 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 750 |
+
`self.processor` in
|
| 751 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 752 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 753 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 754 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 755 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 756 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 757 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 758 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 759 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
| 760 |
+
explained in section 2.2 of
|
| 761 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 762 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 763 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 764 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 765 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 766 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 767 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 768 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 769 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 770 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 771 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 772 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 773 |
+
micro-conditioning as explained in section 2.2 of
|
| 774 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 775 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 776 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 777 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 778 |
+
micro-conditioning as explained in section 2.2 of
|
| 779 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 780 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 781 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 782 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 783 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 784 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 785 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 786 |
+
################### DemoFusion specific parameters ####################
|
| 787 |
+
view_batch_size (`int`, defaults to 16):
|
| 788 |
+
The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
|
| 789 |
+
efficiency but comes with increased GPU memory requirements.
|
| 790 |
+
multi_decoder (`bool`, defaults to True):
|
| 791 |
+
Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
|
| 792 |
+
a tiled decoder becomes necessary.
|
| 793 |
+
stride (`int`, defaults to 64):
|
| 794 |
+
The stride of moving local patches. A smaller stride is better for alleviating seam issues,
|
| 795 |
+
but it also introduces additional computational overhead and inference time.
|
| 796 |
+
cosine_scale_1 (`float`, defaults to 3):
|
| 797 |
+
Control the strength of skip-residual. For specific impacts, please refer to Appendix C
|
| 798 |
+
in the DemoFusion paper.
|
| 799 |
+
cosine_scale_2 (`float`, defaults to 1):
|
| 800 |
+
Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
|
| 801 |
+
in the DemoFusion paper.
|
| 802 |
+
cosine_scale_3 (`float`, defaults to 1):
|
| 803 |
+
Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
|
| 804 |
+
in the DemoFusion paper.
|
| 805 |
+
sigma (`float`, defaults to 1):
|
| 806 |
+
The standerd value of the gaussian filter.
|
| 807 |
+
show_image (`bool`, defaults to False):
|
| 808 |
+
Determine whether to show intermediate results during generation.
|
| 809 |
+
|
| 810 |
+
Examples:
|
| 811 |
+
|
| 812 |
+
Returns:
|
| 813 |
+
a `list` with the generated images at each phase.
|
| 814 |
+
"""
|
| 815 |
+
|
| 816 |
+
# 0. Default height and width to unet
|
| 817 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 818 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 819 |
+
|
| 820 |
+
x1_size = self.default_sample_size * self.vae_scale_factor
|
| 821 |
+
|
| 822 |
+
height_scale = height / x1_size
|
| 823 |
+
width_scale = width / x1_size
|
| 824 |
+
scale_num = int(max(height_scale, width_scale))
|
| 825 |
+
aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
|
| 826 |
+
|
| 827 |
+
original_size = original_size or (height, width)
|
| 828 |
+
target_size = target_size or (height, width)
|
| 829 |
+
|
| 830 |
+
# 1. Check inputs. Raise error if not correct
|
| 831 |
+
self.check_inputs(
|
| 832 |
+
prompt,
|
| 833 |
+
prompt_2,
|
| 834 |
+
height,
|
| 835 |
+
width,
|
| 836 |
+
callback_steps,
|
| 837 |
+
negative_prompt,
|
| 838 |
+
negative_prompt_2,
|
| 839 |
+
prompt_embeds,
|
| 840 |
+
negative_prompt_embeds,
|
| 841 |
+
pooled_prompt_embeds,
|
| 842 |
+
negative_pooled_prompt_embeds,
|
| 843 |
+
num_images_per_prompt,
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
# 2. Define call parameters
|
| 847 |
+
if prompt is not None and isinstance(prompt, str):
|
| 848 |
+
batch_size = 1
|
| 849 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 850 |
+
batch_size = len(prompt)
|
| 851 |
+
else:
|
| 852 |
+
batch_size = prompt_embeds.shape[0]
|
| 853 |
+
|
| 854 |
+
device = self._execution_device
|
| 855 |
+
|
| 856 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 857 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 858 |
+
# corresponds to doing no classifier free guidance.
|
| 859 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 860 |
+
|
| 861 |
+
# 3. Encode input prompt
|
| 862 |
+
text_encoder_lora_scale = (
|
| 863 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 864 |
+
)
|
| 865 |
+
(
|
| 866 |
+
prompt_embeds,
|
| 867 |
+
negative_prompt_embeds,
|
| 868 |
+
pooled_prompt_embeds,
|
| 869 |
+
negative_pooled_prompt_embeds,
|
| 870 |
+
) = self.encode_prompt(
|
| 871 |
+
prompt=prompt,
|
| 872 |
+
prompt_2=prompt_2,
|
| 873 |
+
device=device,
|
| 874 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 875 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 876 |
+
negative_prompt=negative_prompt,
|
| 877 |
+
negative_prompt_2=negative_prompt_2,
|
| 878 |
+
prompt_embeds=prompt_embeds,
|
| 879 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 880 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 881 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 882 |
+
lora_scale=text_encoder_lora_scale,
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
# 4. Prepare timesteps
|
| 886 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 887 |
+
|
| 888 |
+
timesteps = self.scheduler.timesteps
|
| 889 |
+
|
| 890 |
+
# 5. Prepare latent variables
|
| 891 |
+
num_channels_latents = self.unet.config.in_channels
|
| 892 |
+
latents = self.prepare_latents(
|
| 893 |
+
batch_size * num_images_per_prompt,
|
| 894 |
+
num_channels_latents,
|
| 895 |
+
height // scale_num,
|
| 896 |
+
width // scale_num,
|
| 897 |
+
prompt_embeds.dtype,
|
| 898 |
+
device,
|
| 899 |
+
generator,
|
| 900 |
+
latents,
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 904 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 905 |
+
|
| 906 |
+
# 7. Prepare added time ids & embeddings
|
| 907 |
+
add_text_embeds = pooled_prompt_embeds
|
| 908 |
+
add_time_ids = self._get_add_time_ids(
|
| 909 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
| 910 |
+
)
|
| 911 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 912 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 913 |
+
negative_original_size,
|
| 914 |
+
negative_crops_coords_top_left,
|
| 915 |
+
negative_target_size,
|
| 916 |
+
dtype=prompt_embeds.dtype,
|
| 917 |
+
)
|
| 918 |
+
else:
|
| 919 |
+
negative_add_time_ids = add_time_ids
|
| 920 |
+
|
| 921 |
+
if do_classifier_free_guidance:
|
| 922 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 923 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 924 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 925 |
+
|
| 926 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 927 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 928 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 929 |
+
|
| 930 |
+
# 8. Denoising loop
|
| 931 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 932 |
+
|
| 933 |
+
# 7.1 Apply denoising_end
|
| 934 |
+
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
| 935 |
+
discrete_timestep_cutoff = int(
|
| 936 |
+
round(
|
| 937 |
+
self.scheduler.config.num_train_timesteps
|
| 938 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
| 939 |
+
)
|
| 940 |
+
)
|
| 941 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 942 |
+
timesteps = timesteps[:num_inference_steps]
|
| 943 |
+
|
| 944 |
+
output_images = []
|
| 945 |
+
|
| 946 |
+
############################################################### Phase 1 #################################################################
|
| 947 |
+
|
| 948 |
+
print("### Phase 1 Denoising ###")
|
| 949 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 950 |
+
for i, t in enumerate(timesteps):
|
| 951 |
+
latents_for_view = latents
|
| 952 |
+
|
| 953 |
+
# expand the latents if we are doing classifier free guidance
|
| 954 |
+
latent_model_input = latents.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents
|
| 955 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 956 |
+
|
| 957 |
+
# predict the noise residual
|
| 958 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 959 |
+
noise_pred = self.unet(
|
| 960 |
+
latent_model_input,
|
| 961 |
+
t,
|
| 962 |
+
encoder_hidden_states=prompt_embeds,
|
| 963 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 964 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 965 |
+
return_dict=False,
|
| 966 |
+
)[0]
|
| 967 |
+
|
| 968 |
+
# perform guidance
|
| 969 |
+
if do_classifier_free_guidance:
|
| 970 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 971 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 972 |
+
|
| 973 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 974 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 975 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 976 |
+
|
| 977 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 978 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 979 |
+
|
| 980 |
+
# call the callback, if provided
|
| 981 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 982 |
+
progress_bar.update()
|
| 983 |
+
if callback is not None and i % callback_steps == 0:
|
| 984 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 985 |
+
callback(step_idx, t, latents)
|
| 986 |
+
|
| 987 |
+
anchor_mean = latents.mean()
|
| 988 |
+
anchor_std = latents.std()
|
| 989 |
+
if not output_type == "latent":
|
| 990 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 991 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 992 |
+
|
| 993 |
+
if needs_upcasting:
|
| 994 |
+
self.upcast_vae()
|
| 995 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 996 |
+
print("### Phase 1 Decoding ###")
|
| 997 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 998 |
+
# cast back to fp16 if needed
|
| 999 |
+
if needs_upcasting:
|
| 1000 |
+
self.vae.to(dtype=torch.float16)
|
| 1001 |
+
|
| 1002 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1003 |
+
if show_image:
|
| 1004 |
+
plt.figure(figsize=(10, 10))
|
| 1005 |
+
plt.imshow(image[0])
|
| 1006 |
+
plt.axis("off") # Turn off axis numbers and ticks
|
| 1007 |
+
plt.show()
|
| 1008 |
+
output_images.append(image[0])
|
| 1009 |
+
|
| 1010 |
+
####################################################### Phase 2+ #####################################################
|
| 1011 |
+
|
| 1012 |
+
for current_scale_num in range(2, scale_num + 1):
|
| 1013 |
+
print("### Phase {} Denoising ###".format(current_scale_num))
|
| 1014 |
+
current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1015 |
+
current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1016 |
+
if height > width:
|
| 1017 |
+
current_width = int(current_width * aspect_ratio)
|
| 1018 |
+
else:
|
| 1019 |
+
current_height = int(current_height * aspect_ratio)
|
| 1020 |
+
|
| 1021 |
+
latents = F.interpolate(
|
| 1022 |
+
latents,
|
| 1023 |
+
size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)),
|
| 1024 |
+
mode="bicubic",
|
| 1025 |
+
)
|
| 1026 |
+
|
| 1027 |
+
noise_latents = []
|
| 1028 |
+
noise = torch.randn_like(latents)
|
| 1029 |
+
for timestep in timesteps:
|
| 1030 |
+
noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
|
| 1031 |
+
noise_latents.append(noise_latent)
|
| 1032 |
+
latents = noise_latents[0]
|
| 1033 |
+
|
| 1034 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1035 |
+
for i, t in enumerate(timesteps):
|
| 1036 |
+
count = torch.zeros_like(latents)
|
| 1037 |
+
value = torch.zeros_like(latents)
|
| 1038 |
+
cosine_factor = (
|
| 1039 |
+
0.5
|
| 1040 |
+
* (
|
| 1041 |
+
1
|
| 1042 |
+
+ torch.cos(
|
| 1043 |
+
torch.pi
|
| 1044 |
+
* (self.scheduler.config.num_train_timesteps - t)
|
| 1045 |
+
/ self.scheduler.config.num_train_timesteps
|
| 1046 |
+
)
|
| 1047 |
+
).cpu()
|
| 1048 |
+
)
|
| 1049 |
+
|
| 1050 |
+
c1 = cosine_factor**cosine_scale_1
|
| 1051 |
+
latents = latents * (1 - c1) + noise_latents[i] * c1
|
| 1052 |
+
|
| 1053 |
+
############################################# MultiDiffusion #############################################
|
| 1054 |
+
|
| 1055 |
+
views = self.get_views(
|
| 1056 |
+
current_height,
|
| 1057 |
+
current_width,
|
| 1058 |
+
stride=stride,
|
| 1059 |
+
window_size=self.unet.config.sample_size,
|
| 1060 |
+
random_jitter=True,
|
| 1061 |
+
)
|
| 1062 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1063 |
+
|
| 1064 |
+
jitter_range = (self.unet.config.sample_size - stride) // 4
|
| 1065 |
+
latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), "constant", 0)
|
| 1066 |
+
|
| 1067 |
+
count_local = torch.zeros_like(latents_)
|
| 1068 |
+
value_local = torch.zeros_like(latents_)
|
| 1069 |
+
|
| 1070 |
+
for j, batch_view in enumerate(views_batch):
|
| 1071 |
+
vb_size = len(batch_view)
|
| 1072 |
+
|
| 1073 |
+
# get the latents corresponding to the current view coordinates
|
| 1074 |
+
latents_for_view = torch.cat(
|
| 1075 |
+
[
|
| 1076 |
+
latents_[:, :, h_start:h_end, w_start:w_end]
|
| 1077 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 1078 |
+
]
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
# expand the latents if we are doing classifier free guidance
|
| 1082 |
+
latent_model_input = latents_for_view
|
| 1083 |
+
latent_model_input = (
|
| 1084 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1085 |
+
if do_classifier_free_guidance
|
| 1086 |
+
else latent_model_input
|
| 1087 |
+
)
|
| 1088 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1089 |
+
|
| 1090 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1091 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1092 |
+
add_time_ids_input = []
|
| 1093 |
+
for h_start, h_end, w_start, w_end in batch_view:
|
| 1094 |
+
add_time_ids_ = add_time_ids.clone()
|
| 1095 |
+
add_time_ids_[:, 2] = h_start * self.vae_scale_factor
|
| 1096 |
+
add_time_ids_[:, 3] = w_start * self.vae_scale_factor
|
| 1097 |
+
add_time_ids_input.append(add_time_ids_)
|
| 1098 |
+
add_time_ids_input = torch.cat(add_time_ids_input)
|
| 1099 |
+
|
| 1100 |
+
# predict the noise residual
|
| 1101 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1102 |
+
noise_pred = self.unet(
|
| 1103 |
+
latent_model_input,
|
| 1104 |
+
t,
|
| 1105 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1106 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1107 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1108 |
+
return_dict=False,
|
| 1109 |
+
)[0]
|
| 1110 |
+
|
| 1111 |
+
if do_classifier_free_guidance:
|
| 1112 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1113 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1114 |
+
|
| 1115 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1116 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1117 |
+
noise_pred = rescale_noise_cfg(
|
| 1118 |
+
noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
|
| 1119 |
+
)
|
| 1120 |
+
|
| 1121 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1122 |
+
self.scheduler._init_step_index(t)
|
| 1123 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1124 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
|
| 1125 |
+
)[0]
|
| 1126 |
+
|
| 1127 |
+
# extract value from batch
|
| 1128 |
+
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
|
| 1129 |
+
latents_denoised_batch.chunk(vb_size), batch_view
|
| 1130 |
+
):
|
| 1131 |
+
value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
|
| 1132 |
+
count_local[:, :, h_start:h_end, w_start:w_end] += 1
|
| 1133 |
+
|
| 1134 |
+
value_local = value_local[
|
| 1135 |
+
:,
|
| 1136 |
+
:,
|
| 1137 |
+
jitter_range : jitter_range + current_height // self.vae_scale_factor,
|
| 1138 |
+
jitter_range : jitter_range + current_width // self.vae_scale_factor,
|
| 1139 |
+
]
|
| 1140 |
+
count_local = count_local[
|
| 1141 |
+
:,
|
| 1142 |
+
:,
|
| 1143 |
+
jitter_range : jitter_range + current_height // self.vae_scale_factor,
|
| 1144 |
+
jitter_range : jitter_range + current_width // self.vae_scale_factor,
|
| 1145 |
+
]
|
| 1146 |
+
|
| 1147 |
+
c2 = cosine_factor**cosine_scale_2
|
| 1148 |
+
|
| 1149 |
+
value += value_local / count_local * (1 - c2)
|
| 1150 |
+
count += torch.ones_like(value_local) * (1 - c2)
|
| 1151 |
+
|
| 1152 |
+
############################################# Dilated Sampling #############################################
|
| 1153 |
+
|
| 1154 |
+
views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
|
| 1155 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1156 |
+
|
| 1157 |
+
h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
|
| 1158 |
+
w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
|
| 1159 |
+
latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), "constant", 0)
|
| 1160 |
+
|
| 1161 |
+
count_global = torch.zeros_like(latents_)
|
| 1162 |
+
value_global = torch.zeros_like(latents_)
|
| 1163 |
+
|
| 1164 |
+
c3 = 0.99 * cosine_factor**cosine_scale_3 + 1e-2
|
| 1165 |
+
std_, mean_ = latents_.std(), latents_.mean()
|
| 1166 |
+
latents_gaussian = gaussian_filter(
|
| 1167 |
+
latents_, kernel_size=(2 * current_scale_num - 1), sigma=sigma * c3
|
| 1168 |
+
)
|
| 1169 |
+
latents_gaussian = (
|
| 1170 |
+
latents_gaussian - latents_gaussian.mean()
|
| 1171 |
+
) / latents_gaussian.std() * std_ + mean_
|
| 1172 |
+
|
| 1173 |
+
for j, batch_view in enumerate(views_batch):
|
| 1174 |
+
latents_for_view = torch.cat(
|
| 1175 |
+
[latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
|
| 1176 |
+
)
|
| 1177 |
+
latents_for_view_gaussian = torch.cat(
|
| 1178 |
+
[latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
|
| 1179 |
+
)
|
| 1180 |
+
|
| 1181 |
+
vb_size = latents_for_view.size(0)
|
| 1182 |
+
|
| 1183 |
+
# expand the latents if we are doing classifier free guidance
|
| 1184 |
+
latent_model_input = latents_for_view_gaussian
|
| 1185 |
+
latent_model_input = (
|
| 1186 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1187 |
+
if do_classifier_free_guidance
|
| 1188 |
+
else latent_model_input
|
| 1189 |
+
)
|
| 1190 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1191 |
+
|
| 1192 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1193 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1194 |
+
add_time_ids_input = torch.cat([add_time_ids] * vb_size)
|
| 1195 |
+
|
| 1196 |
+
# predict the noise residual
|
| 1197 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1198 |
+
noise_pred = self.unet(
|
| 1199 |
+
latent_model_input,
|
| 1200 |
+
t,
|
| 1201 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1202 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1203 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1204 |
+
return_dict=False,
|
| 1205 |
+
)[0]
|
| 1206 |
+
|
| 1207 |
+
if do_classifier_free_guidance:
|
| 1208 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1209 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1210 |
+
|
| 1211 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1212 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1213 |
+
noise_pred = rescale_noise_cfg(
|
| 1214 |
+
noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
|
| 1215 |
+
)
|
| 1216 |
+
|
| 1217 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1218 |
+
self.scheduler._init_step_index(t)
|
| 1219 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1220 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
|
| 1221 |
+
)[0]
|
| 1222 |
+
|
| 1223 |
+
# extract value from batch
|
| 1224 |
+
for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view):
|
| 1225 |
+
value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
|
| 1226 |
+
count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
|
| 1227 |
+
|
| 1228 |
+
c2 = cosine_factor**cosine_scale_2
|
| 1229 |
+
|
| 1230 |
+
value_global = value_global[:, :, h_pad:, w_pad:]
|
| 1231 |
+
|
| 1232 |
+
value += value_global * c2
|
| 1233 |
+
count += torch.ones_like(value_global) * c2
|
| 1234 |
+
|
| 1235 |
+
###########################################################
|
| 1236 |
+
|
| 1237 |
+
latents = torch.where(count > 0, value / count, value)
|
| 1238 |
+
|
| 1239 |
+
# call the callback, if provided
|
| 1240 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1241 |
+
progress_bar.update()
|
| 1242 |
+
if callback is not None and i % callback_steps == 0:
|
| 1243 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1244 |
+
callback(step_idx, t, latents)
|
| 1245 |
+
|
| 1246 |
+
#########################################################################################################################################
|
| 1247 |
+
|
| 1248 |
+
latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
|
| 1249 |
+
if not output_type == "latent":
|
| 1250 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1251 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1252 |
+
|
| 1253 |
+
if needs_upcasting:
|
| 1254 |
+
self.upcast_vae()
|
| 1255 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1256 |
+
|
| 1257 |
+
print("### Phase {} Decoding ###".format(current_scale_num))
|
| 1258 |
+
if multi_decoder:
|
| 1259 |
+
image = self.tiled_decode(latents, current_height, current_width)
|
| 1260 |
+
else:
|
| 1261 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1262 |
+
|
| 1263 |
+
# cast back to fp16 if needed
|
| 1264 |
+
if needs_upcasting:
|
| 1265 |
+
self.vae.to(dtype=torch.float16)
|
| 1266 |
+
else:
|
| 1267 |
+
image = latents
|
| 1268 |
+
|
| 1269 |
+
if not output_type == "latent":
|
| 1270 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1271 |
+
if show_image:
|
| 1272 |
+
plt.figure(figsize=(10, 10))
|
| 1273 |
+
plt.imshow(image[0])
|
| 1274 |
+
plt.axis("off") # Turn off axis numbers and ticks
|
| 1275 |
+
plt.show()
|
| 1276 |
+
output_images.append(image[0])
|
| 1277 |
+
|
| 1278 |
+
# Offload all models
|
| 1279 |
+
self.maybe_free_model_hooks()
|
| 1280 |
+
|
| 1281 |
+
return output_images
|
| 1282 |
+
|
| 1283 |
+
# Overrride to properly handle the loading and unloading of the additional text encoder.
|
| 1284 |
+
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
|
| 1285 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
| 1286 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
| 1287 |
+
# pipeline.
|
| 1288 |
+
|
| 1289 |
+
# Remove any existing hooks.
|
| 1290 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
| 1291 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
| 1292 |
+
else:
|
| 1293 |
+
raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
|
| 1294 |
+
|
| 1295 |
+
is_model_cpu_offload = False
|
| 1296 |
+
is_sequential_cpu_offload = False
|
| 1297 |
+
recursive = False
|
| 1298 |
+
for _, component in self.components.items():
|
| 1299 |
+
if isinstance(component, torch.nn.Module):
|
| 1300 |
+
if hasattr(component, "_hf_hook"):
|
| 1301 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
| 1302 |
+
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
| 1303 |
+
logger.info(
|
| 1304 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
| 1305 |
+
)
|
| 1306 |
+
recursive = is_sequential_cpu_offload
|
| 1307 |
+
remove_hook_from_module(component, recurse=recursive)
|
| 1308 |
+
state_dict, network_alphas = self.lora_state_dict(
|
| 1309 |
+
pretrained_model_name_or_path_or_dict,
|
| 1310 |
+
unet_config=self.unet.config,
|
| 1311 |
+
**kwargs,
|
| 1312 |
+
)
|
| 1313 |
+
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
|
| 1314 |
+
|
| 1315 |
+
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
|
| 1316 |
+
if len(text_encoder_state_dict) > 0:
|
| 1317 |
+
self.load_lora_into_text_encoder(
|
| 1318 |
+
text_encoder_state_dict,
|
| 1319 |
+
network_alphas=network_alphas,
|
| 1320 |
+
text_encoder=self.text_encoder,
|
| 1321 |
+
prefix="text_encoder",
|
| 1322 |
+
lora_scale=self.lora_scale,
|
| 1323 |
+
)
|
| 1324 |
+
|
| 1325 |
+
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
|
| 1326 |
+
if len(text_encoder_2_state_dict) > 0:
|
| 1327 |
+
self.load_lora_into_text_encoder(
|
| 1328 |
+
text_encoder_2_state_dict,
|
| 1329 |
+
network_alphas=network_alphas,
|
| 1330 |
+
text_encoder=self.text_encoder_2,
|
| 1331 |
+
prefix="text_encoder_2",
|
| 1332 |
+
lora_scale=self.lora_scale,
|
| 1333 |
+
)
|
| 1334 |
+
|
| 1335 |
+
# Offload back.
|
| 1336 |
+
if is_model_cpu_offload:
|
| 1337 |
+
self.enable_model_cpu_offload()
|
| 1338 |
+
elif is_sequential_cpu_offload:
|
| 1339 |
+
self.enable_sequential_cpu_offload()
|
| 1340 |
+
|
| 1341 |
+
@classmethod
|
| 1342 |
+
def save_lora_weights(
|
| 1343 |
+
self,
|
| 1344 |
+
save_directory: Union[str, os.PathLike],
|
| 1345 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1346 |
+
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1347 |
+
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1348 |
+
is_main_process: bool = True,
|
| 1349 |
+
weight_name: str = None,
|
| 1350 |
+
save_function: Callable = None,
|
| 1351 |
+
safe_serialization: bool = True,
|
| 1352 |
+
):
|
| 1353 |
+
state_dict = {}
|
| 1354 |
+
|
| 1355 |
+
def pack_weights(layers, prefix):
|
| 1356 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
| 1357 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
| 1358 |
+
return layers_state_dict
|
| 1359 |
+
|
| 1360 |
+
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
|
| 1361 |
+
raise ValueError(
|
| 1362 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
|
| 1363 |
+
)
|
| 1364 |
+
|
| 1365 |
+
if unet_lora_layers:
|
| 1366 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
| 1367 |
+
|
| 1368 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
| 1369 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
| 1370 |
+
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
|
| 1371 |
+
|
| 1372 |
+
self.write_lora_layers(
|
| 1373 |
+
state_dict=state_dict,
|
| 1374 |
+
save_directory=save_directory,
|
| 1375 |
+
is_main_process=is_main_process,
|
| 1376 |
+
weight_name=weight_name,
|
| 1377 |
+
save_function=save_function,
|
| 1378 |
+
safe_serialization=safe_serialization,
|
| 1379 |
+
)
|
| 1380 |
+
|
| 1381 |
+
def _remove_text_encoder_monkey_patch(self):
|
| 1382 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
| 1383 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
v0.27.0/pipeline_fabric.py
ADDED
|
@@ -0,0 +1,751 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 FABRIC authors and the HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import List, Optional, Union
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from packaging import version
|
| 18 |
+
from PIL import Image
|
| 19 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 20 |
+
|
| 21 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
| 22 |
+
from diffusers.configuration_utils import FrozenDict
|
| 23 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 24 |
+
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
| 25 |
+
from diffusers.models.attention import BasicTransformerBlock
|
| 26 |
+
from diffusers.models.attention_processor import LoRAAttnProcessor
|
| 27 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 28 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 29 |
+
from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers
|
| 30 |
+
from diffusers.utils import (
|
| 31 |
+
deprecate,
|
| 32 |
+
logging,
|
| 33 |
+
replace_example_docstring,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 39 |
+
|
| 40 |
+
EXAMPLE_DOC_STRING = """
|
| 41 |
+
Examples:
|
| 42 |
+
```py
|
| 43 |
+
>>> from diffusers import DiffusionPipeline
|
| 44 |
+
>>> import torch
|
| 45 |
+
|
| 46 |
+
>>> model_id = "dreamlike-art/dreamlike-photoreal-2.0"
|
| 47 |
+
>>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric")
|
| 48 |
+
>>> pipe = pipe.to("cuda")
|
| 49 |
+
>>> prompt = "a giant standing in a fantasy landscape best quality"
|
| 50 |
+
>>> liked = [] # list of images for positive feedback
|
| 51 |
+
>>> disliked = [] # list of images for negative feedback
|
| 52 |
+
>>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0]
|
| 53 |
+
```
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class FabricCrossAttnProcessor:
|
| 58 |
+
def __init__(self):
|
| 59 |
+
self.attntion_probs = None
|
| 60 |
+
|
| 61 |
+
def __call__(
|
| 62 |
+
self,
|
| 63 |
+
attn,
|
| 64 |
+
hidden_states,
|
| 65 |
+
encoder_hidden_states=None,
|
| 66 |
+
attention_mask=None,
|
| 67 |
+
weights=None,
|
| 68 |
+
lora_scale=1.0,
|
| 69 |
+
):
|
| 70 |
+
batch_size, sequence_length, _ = (
|
| 71 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 72 |
+
)
|
| 73 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 74 |
+
|
| 75 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 76 |
+
query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states)
|
| 77 |
+
else:
|
| 78 |
+
query = attn.to_q(hidden_states)
|
| 79 |
+
|
| 80 |
+
if encoder_hidden_states is None:
|
| 81 |
+
encoder_hidden_states = hidden_states
|
| 82 |
+
elif attn.norm_cross:
|
| 83 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 84 |
+
|
| 85 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 86 |
+
key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states)
|
| 87 |
+
value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states)
|
| 88 |
+
else:
|
| 89 |
+
key = attn.to_k(encoder_hidden_states)
|
| 90 |
+
value = attn.to_v(encoder_hidden_states)
|
| 91 |
+
|
| 92 |
+
query = attn.head_to_batch_dim(query)
|
| 93 |
+
key = attn.head_to_batch_dim(key)
|
| 94 |
+
value = attn.head_to_batch_dim(value)
|
| 95 |
+
|
| 96 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 97 |
+
|
| 98 |
+
if weights is not None:
|
| 99 |
+
if weights.shape[0] != 1:
|
| 100 |
+
weights = weights.repeat_interleave(attn.heads, dim=0)
|
| 101 |
+
attention_probs = attention_probs * weights[:, None]
|
| 102 |
+
attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True)
|
| 103 |
+
|
| 104 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 105 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 106 |
+
|
| 107 |
+
# linear proj
|
| 108 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 109 |
+
hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states)
|
| 110 |
+
else:
|
| 111 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 112 |
+
# dropout
|
| 113 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 114 |
+
|
| 115 |
+
return hidden_states
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class FabricPipeline(DiffusionPipeline):
|
| 119 |
+
r"""
|
| 120 |
+
Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images.
|
| 121 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 122 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
vae ([`AutoencoderKL`]):
|
| 126 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 127 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 128 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 129 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 130 |
+
A `CLIPTokenizer` to tokenize text.
|
| 131 |
+
unet ([`UNet2DConditionModel`]):
|
| 132 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 133 |
+
scheduler ([`EulerAncestralDiscreteScheduler`]):
|
| 134 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 135 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 136 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 137 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 138 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 139 |
+
about a model's potential harms.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __init__(
|
| 143 |
+
self,
|
| 144 |
+
vae: AutoencoderKL,
|
| 145 |
+
text_encoder: CLIPTextModel,
|
| 146 |
+
tokenizer: CLIPTokenizer,
|
| 147 |
+
unet: UNet2DConditionModel,
|
| 148 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 149 |
+
requires_safety_checker: bool = True,
|
| 150 |
+
):
|
| 151 |
+
super().__init__()
|
| 152 |
+
|
| 153 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 154 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 155 |
+
) < version.parse("0.9.0.dev0")
|
| 156 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 157 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 158 |
+
deprecation_message = (
|
| 159 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 160 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 161 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 162 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 163 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 164 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 165 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 166 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 167 |
+
" the `unet/config.json` file"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 171 |
+
new_config = dict(unet.config)
|
| 172 |
+
new_config["sample_size"] = 64
|
| 173 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 174 |
+
|
| 175 |
+
self.register_modules(
|
| 176 |
+
unet=unet,
|
| 177 |
+
vae=vae,
|
| 178 |
+
text_encoder=text_encoder,
|
| 179 |
+
tokenizer=tokenizer,
|
| 180 |
+
scheduler=scheduler,
|
| 181 |
+
)
|
| 182 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 183 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 184 |
+
|
| 185 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 186 |
+
def _encode_prompt(
|
| 187 |
+
self,
|
| 188 |
+
prompt,
|
| 189 |
+
device,
|
| 190 |
+
num_images_per_prompt,
|
| 191 |
+
do_classifier_free_guidance,
|
| 192 |
+
negative_prompt=None,
|
| 193 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 195 |
+
lora_scale: Optional[float] = None,
|
| 196 |
+
):
|
| 197 |
+
r"""
|
| 198 |
+
Encodes the prompt into text encoder hidden states.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 202 |
+
prompt to be encoded
|
| 203 |
+
device: (`torch.device`):
|
| 204 |
+
torch device
|
| 205 |
+
num_images_per_prompt (`int`):
|
| 206 |
+
number of images that should be generated per prompt
|
| 207 |
+
do_classifier_free_guidance (`bool`):
|
| 208 |
+
whether to use classifier free guidance or not
|
| 209 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 210 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 211 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 212 |
+
less than `1`).
|
| 213 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 214 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 215 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 216 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 217 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 218 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 219 |
+
argument.
|
| 220 |
+
lora_scale (`float`, *optional*):
|
| 221 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 222 |
+
"""
|
| 223 |
+
# set lora scale so that monkey patched LoRA
|
| 224 |
+
# function of text encoder can correctly access it
|
| 225 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 226 |
+
self._lora_scale = lora_scale
|
| 227 |
+
|
| 228 |
+
if prompt is not None and isinstance(prompt, str):
|
| 229 |
+
batch_size = 1
|
| 230 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 231 |
+
batch_size = len(prompt)
|
| 232 |
+
else:
|
| 233 |
+
batch_size = prompt_embeds.shape[0]
|
| 234 |
+
|
| 235 |
+
if prompt_embeds is None:
|
| 236 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 237 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 238 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 239 |
+
|
| 240 |
+
text_inputs = self.tokenizer(
|
| 241 |
+
prompt,
|
| 242 |
+
padding="max_length",
|
| 243 |
+
max_length=self.tokenizer.model_max_length,
|
| 244 |
+
truncation=True,
|
| 245 |
+
return_tensors="pt",
|
| 246 |
+
)
|
| 247 |
+
text_input_ids = text_inputs.input_ids
|
| 248 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 249 |
+
|
| 250 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 251 |
+
text_input_ids, untruncated_ids
|
| 252 |
+
):
|
| 253 |
+
removed_text = self.tokenizer.batch_decode(
|
| 254 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 255 |
+
)
|
| 256 |
+
logger.warning(
|
| 257 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 258 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 262 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 263 |
+
else:
|
| 264 |
+
attention_mask = None
|
| 265 |
+
|
| 266 |
+
prompt_embeds = self.text_encoder(
|
| 267 |
+
text_input_ids.to(device),
|
| 268 |
+
attention_mask=attention_mask,
|
| 269 |
+
)
|
| 270 |
+
prompt_embeds = prompt_embeds[0]
|
| 271 |
+
|
| 272 |
+
if self.text_encoder is not None:
|
| 273 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 274 |
+
elif self.unet is not None:
|
| 275 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 276 |
+
else:
|
| 277 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 278 |
+
|
| 279 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 280 |
+
|
| 281 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 282 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 283 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 284 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 285 |
+
|
| 286 |
+
# get unconditional embeddings for classifier free guidance
|
| 287 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 288 |
+
uncond_tokens: List[str]
|
| 289 |
+
if negative_prompt is None:
|
| 290 |
+
uncond_tokens = [""] * batch_size
|
| 291 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 292 |
+
raise TypeError(
|
| 293 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 294 |
+
f" {type(prompt)}."
|
| 295 |
+
)
|
| 296 |
+
elif isinstance(negative_prompt, str):
|
| 297 |
+
uncond_tokens = [negative_prompt]
|
| 298 |
+
elif batch_size != len(negative_prompt):
|
| 299 |
+
raise ValueError(
|
| 300 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 301 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 302 |
+
" the batch size of `prompt`."
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
uncond_tokens = negative_prompt
|
| 306 |
+
|
| 307 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 308 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 309 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 310 |
+
|
| 311 |
+
max_length = prompt_embeds.shape[1]
|
| 312 |
+
uncond_input = self.tokenizer(
|
| 313 |
+
uncond_tokens,
|
| 314 |
+
padding="max_length",
|
| 315 |
+
max_length=max_length,
|
| 316 |
+
truncation=True,
|
| 317 |
+
return_tensors="pt",
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 321 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 322 |
+
else:
|
| 323 |
+
attention_mask = None
|
| 324 |
+
|
| 325 |
+
negative_prompt_embeds = self.text_encoder(
|
| 326 |
+
uncond_input.input_ids.to(device),
|
| 327 |
+
attention_mask=attention_mask,
|
| 328 |
+
)
|
| 329 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 330 |
+
|
| 331 |
+
if do_classifier_free_guidance:
|
| 332 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 333 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 334 |
+
|
| 335 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 336 |
+
|
| 337 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 338 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 339 |
+
|
| 340 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 341 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 342 |
+
# to avoid doing two forward passes
|
| 343 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 344 |
+
|
| 345 |
+
return prompt_embeds
|
| 346 |
+
|
| 347 |
+
def get_unet_hidden_states(self, z_all, t, prompt_embd):
|
| 348 |
+
cached_hidden_states = []
|
| 349 |
+
for module in self.unet.modules():
|
| 350 |
+
if isinstance(module, BasicTransformerBlock):
|
| 351 |
+
|
| 352 |
+
def new_forward(self, hidden_states, *args, **kwargs):
|
| 353 |
+
cached_hidden_states.append(hidden_states.clone().detach().cpu())
|
| 354 |
+
return self.old_forward(hidden_states, *args, **kwargs)
|
| 355 |
+
|
| 356 |
+
module.attn1.old_forward = module.attn1.forward
|
| 357 |
+
module.attn1.forward = new_forward.__get__(module.attn1)
|
| 358 |
+
|
| 359 |
+
# run forward pass to cache hidden states, output can be discarded
|
| 360 |
+
_ = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 361 |
+
|
| 362 |
+
# restore original forward pass
|
| 363 |
+
for module in self.unet.modules():
|
| 364 |
+
if isinstance(module, BasicTransformerBlock):
|
| 365 |
+
module.attn1.forward = module.attn1.old_forward
|
| 366 |
+
del module.attn1.old_forward
|
| 367 |
+
|
| 368 |
+
return cached_hidden_states
|
| 369 |
+
|
| 370 |
+
def unet_forward_with_cached_hidden_states(
|
| 371 |
+
self,
|
| 372 |
+
z_all,
|
| 373 |
+
t,
|
| 374 |
+
prompt_embd,
|
| 375 |
+
cached_pos_hiddens: Optional[List[torch.Tensor]] = None,
|
| 376 |
+
cached_neg_hiddens: Optional[List[torch.Tensor]] = None,
|
| 377 |
+
pos_weights=(0.8, 0.8),
|
| 378 |
+
neg_weights=(0.5, 0.5),
|
| 379 |
+
):
|
| 380 |
+
if cached_pos_hiddens is None and cached_neg_hiddens is None:
|
| 381 |
+
return self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 382 |
+
|
| 383 |
+
local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
|
| 384 |
+
local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
|
| 385 |
+
for block, pos_weight, neg_weight in zip(
|
| 386 |
+
self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks,
|
| 387 |
+
local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1],
|
| 388 |
+
local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1],
|
| 389 |
+
):
|
| 390 |
+
for module in block.modules():
|
| 391 |
+
if isinstance(module, BasicTransformerBlock):
|
| 392 |
+
|
| 393 |
+
def new_forward(
|
| 394 |
+
self,
|
| 395 |
+
hidden_states,
|
| 396 |
+
pos_weight=pos_weight,
|
| 397 |
+
neg_weight=neg_weight,
|
| 398 |
+
**kwargs,
|
| 399 |
+
):
|
| 400 |
+
cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0)
|
| 401 |
+
batch_size, d_model = cond_hiddens.shape[:2]
|
| 402 |
+
device, dtype = hidden_states.device, hidden_states.dtype
|
| 403 |
+
|
| 404 |
+
weights = torch.ones(batch_size, d_model, device=device, dtype=dtype)
|
| 405 |
+
out_pos = self.old_forward(hidden_states)
|
| 406 |
+
out_neg = self.old_forward(hidden_states)
|
| 407 |
+
|
| 408 |
+
if cached_pos_hiddens is not None:
|
| 409 |
+
cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device)
|
| 410 |
+
cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1)
|
| 411 |
+
pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model)
|
| 412 |
+
pos_weights[:, d_model:] = pos_weight
|
| 413 |
+
attn_with_weights = FabricCrossAttnProcessor()
|
| 414 |
+
out_pos = attn_with_weights(
|
| 415 |
+
self,
|
| 416 |
+
cond_hiddens,
|
| 417 |
+
encoder_hidden_states=cond_pos_hs,
|
| 418 |
+
weights=pos_weights,
|
| 419 |
+
)
|
| 420 |
+
else:
|
| 421 |
+
out_pos = self.old_forward(cond_hiddens)
|
| 422 |
+
|
| 423 |
+
if cached_neg_hiddens is not None:
|
| 424 |
+
cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device)
|
| 425 |
+
uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1)
|
| 426 |
+
neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model)
|
| 427 |
+
neg_weights[:, d_model:] = neg_weight
|
| 428 |
+
attn_with_weights = FabricCrossAttnProcessor()
|
| 429 |
+
out_neg = attn_with_weights(
|
| 430 |
+
self,
|
| 431 |
+
uncond_hiddens,
|
| 432 |
+
encoder_hidden_states=uncond_neg_hs,
|
| 433 |
+
weights=neg_weights,
|
| 434 |
+
)
|
| 435 |
+
else:
|
| 436 |
+
out_neg = self.old_forward(uncond_hiddens)
|
| 437 |
+
|
| 438 |
+
out = torch.cat([out_pos, out_neg], dim=0)
|
| 439 |
+
return out
|
| 440 |
+
|
| 441 |
+
module.attn1.old_forward = module.attn1.forward
|
| 442 |
+
module.attn1.forward = new_forward.__get__(module.attn1)
|
| 443 |
+
|
| 444 |
+
out = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 445 |
+
|
| 446 |
+
# restore original forward pass
|
| 447 |
+
for module in self.unet.modules():
|
| 448 |
+
if isinstance(module, BasicTransformerBlock):
|
| 449 |
+
module.attn1.forward = module.attn1.old_forward
|
| 450 |
+
del module.attn1.old_forward
|
| 451 |
+
|
| 452 |
+
return out
|
| 453 |
+
|
| 454 |
+
def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor:
|
| 455 |
+
images_t = [self.image_to_tensor(img, dim, dtype) for img in images]
|
| 456 |
+
images_t = torch.stack(images_t).to(device)
|
| 457 |
+
latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator)
|
| 458 |
+
|
| 459 |
+
return torch.cat([latents], dim=0)
|
| 460 |
+
|
| 461 |
+
def check_inputs(
|
| 462 |
+
self,
|
| 463 |
+
prompt,
|
| 464 |
+
negative_prompt=None,
|
| 465 |
+
liked=None,
|
| 466 |
+
disliked=None,
|
| 467 |
+
height=None,
|
| 468 |
+
width=None,
|
| 469 |
+
):
|
| 470 |
+
if prompt is None:
|
| 471 |
+
raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.")
|
| 472 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 473 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 474 |
+
|
| 475 |
+
if negative_prompt is not None and (
|
| 476 |
+
not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
|
| 477 |
+
):
|
| 478 |
+
raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
|
| 479 |
+
|
| 480 |
+
if liked is not None and not isinstance(liked, list):
|
| 481 |
+
raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}")
|
| 482 |
+
|
| 483 |
+
if disliked is not None and not isinstance(disliked, list):
|
| 484 |
+
raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}")
|
| 485 |
+
|
| 486 |
+
if height is not None and not isinstance(height, int):
|
| 487 |
+
raise ValueError(f"`height` has to be of type `int` but is {type(height)}")
|
| 488 |
+
|
| 489 |
+
if width is not None and not isinstance(width, int):
|
| 490 |
+
raise ValueError(f"`width` has to be of type `int` but is {type(width)}")
|
| 491 |
+
|
| 492 |
+
@torch.no_grad()
|
| 493 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 494 |
+
def __call__(
|
| 495 |
+
self,
|
| 496 |
+
prompt: Optional[Union[str, List[str]]] = "",
|
| 497 |
+
negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality",
|
| 498 |
+
liked: Optional[Union[List[str], List[Image.Image]]] = [],
|
| 499 |
+
disliked: Optional[Union[List[str], List[Image.Image]]] = [],
|
| 500 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 501 |
+
height: int = 512,
|
| 502 |
+
width: int = 512,
|
| 503 |
+
return_dict: bool = True,
|
| 504 |
+
num_images: int = 4,
|
| 505 |
+
guidance_scale: float = 7.0,
|
| 506 |
+
num_inference_steps: int = 20,
|
| 507 |
+
output_type: Optional[str] = "pil",
|
| 508 |
+
feedback_start_ratio: float = 0.33,
|
| 509 |
+
feedback_end_ratio: float = 0.66,
|
| 510 |
+
min_weight: float = 0.05,
|
| 511 |
+
max_weight: float = 0.8,
|
| 512 |
+
neg_scale: float = 0.5,
|
| 513 |
+
pos_bottleneck_scale: float = 1.0,
|
| 514 |
+
neg_bottleneck_scale: float = 1.0,
|
| 515 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 516 |
+
):
|
| 517 |
+
r"""
|
| 518 |
+
The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The
|
| 519 |
+
feedback can be given as a list of liked and disliked images.
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 523 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`
|
| 524 |
+
instead.
|
| 525 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 526 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 527 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 528 |
+
liked (`List[Image.Image]` or `List[str]`, *optional*):
|
| 529 |
+
Encourages images with liked features.
|
| 530 |
+
disliked (`List[Image.Image]` or `List[str]`, *optional*):
|
| 531 |
+
Discourages images with disliked features.
|
| 532 |
+
generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*):
|
| 533 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to
|
| 534 |
+
make generation deterministic.
|
| 535 |
+
height (`int`, *optional*, defaults to 512):
|
| 536 |
+
Height of the generated image.
|
| 537 |
+
width (`int`, *optional*, defaults to 512):
|
| 538 |
+
Width of the generated image.
|
| 539 |
+
num_images (`int`, *optional*, defaults to 4):
|
| 540 |
+
The number of images to generate per prompt.
|
| 541 |
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
| 542 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 543 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 544 |
+
num_inference_steps (`int`, *optional*, defaults to 20):
|
| 545 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 546 |
+
expense of slower inference.
|
| 547 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 548 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 549 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 550 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 551 |
+
plain tuple.
|
| 552 |
+
feedback_start_ratio (`float`, *optional*, defaults to `.33`):
|
| 553 |
+
Start point for providing feedback (between 0 and 1).
|
| 554 |
+
feedback_end_ratio (`float`, *optional*, defaults to `.66`):
|
| 555 |
+
End point for providing feedback (between 0 and 1).
|
| 556 |
+
min_weight (`float`, *optional*, defaults to `.05`):
|
| 557 |
+
Minimum weight for feedback.
|
| 558 |
+
max_weight (`float`, *optional*, defults tp `1.0`):
|
| 559 |
+
Maximum weight for feedback.
|
| 560 |
+
neg_scale (`float`, *optional*, defaults to `.5`):
|
| 561 |
+
Scale factor for negative feedback.
|
| 562 |
+
|
| 563 |
+
Examples:
|
| 564 |
+
|
| 565 |
+
Returns:
|
| 566 |
+
[`~pipelines.fabric.FabricPipelineOutput`] or `tuple`:
|
| 567 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 568 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 569 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 570 |
+
"not-safe-for-work" (nsfw) content.
|
| 571 |
+
|
| 572 |
+
"""
|
| 573 |
+
|
| 574 |
+
self.check_inputs(prompt, negative_prompt, liked, disliked)
|
| 575 |
+
|
| 576 |
+
device = self._execution_device
|
| 577 |
+
dtype = self.unet.dtype
|
| 578 |
+
|
| 579 |
+
if isinstance(prompt, str) and prompt is not None:
|
| 580 |
+
batch_size = 1
|
| 581 |
+
elif isinstance(prompt, list) and prompt is not None:
|
| 582 |
+
batch_size = len(prompt)
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 585 |
+
|
| 586 |
+
if isinstance(negative_prompt, str):
|
| 587 |
+
negative_prompt = negative_prompt
|
| 588 |
+
elif isinstance(negative_prompt, list):
|
| 589 |
+
negative_prompt = negative_prompt
|
| 590 |
+
else:
|
| 591 |
+
assert len(negative_prompt) == batch_size
|
| 592 |
+
|
| 593 |
+
shape = (
|
| 594 |
+
batch_size * num_images,
|
| 595 |
+
self.unet.config.in_channels,
|
| 596 |
+
height // self.vae_scale_factor,
|
| 597 |
+
width // self.vae_scale_factor,
|
| 598 |
+
)
|
| 599 |
+
latent_noise = randn_tensor(
|
| 600 |
+
shape,
|
| 601 |
+
device=device,
|
| 602 |
+
dtype=dtype,
|
| 603 |
+
generator=generator,
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
positive_latents = (
|
| 607 |
+
self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator)
|
| 608 |
+
if liked and len(liked) > 0
|
| 609 |
+
else torch.tensor(
|
| 610 |
+
[],
|
| 611 |
+
device=device,
|
| 612 |
+
dtype=dtype,
|
| 613 |
+
)
|
| 614 |
+
)
|
| 615 |
+
negative_latents = (
|
| 616 |
+
self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator)
|
| 617 |
+
if disliked and len(disliked) > 0
|
| 618 |
+
else torch.tensor(
|
| 619 |
+
[],
|
| 620 |
+
device=device,
|
| 621 |
+
dtype=dtype,
|
| 622 |
+
)
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
do_classifier_free_guidance = guidance_scale > 0.1
|
| 626 |
+
|
| 627 |
+
(prompt_neg_embs, prompt_pos_embs) = self._encode_prompt(
|
| 628 |
+
prompt,
|
| 629 |
+
device,
|
| 630 |
+
num_images,
|
| 631 |
+
do_classifier_free_guidance,
|
| 632 |
+
negative_prompt,
|
| 633 |
+
).split([num_images * batch_size, num_images * batch_size])
|
| 634 |
+
|
| 635 |
+
batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0)
|
| 636 |
+
|
| 637 |
+
null_tokens = self.tokenizer(
|
| 638 |
+
[""],
|
| 639 |
+
return_tensors="pt",
|
| 640 |
+
max_length=self.tokenizer.model_max_length,
|
| 641 |
+
padding="max_length",
|
| 642 |
+
truncation=True,
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 646 |
+
attention_mask = null_tokens.attention_mask.to(device)
|
| 647 |
+
else:
|
| 648 |
+
attention_mask = None
|
| 649 |
+
|
| 650 |
+
null_prompt_emb = self.text_encoder(
|
| 651 |
+
input_ids=null_tokens.input_ids.to(device),
|
| 652 |
+
attention_mask=attention_mask,
|
| 653 |
+
).last_hidden_state
|
| 654 |
+
|
| 655 |
+
null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype)
|
| 656 |
+
|
| 657 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 658 |
+
timesteps = self.scheduler.timesteps
|
| 659 |
+
latent_noise = latent_noise * self.scheduler.init_noise_sigma
|
| 660 |
+
|
| 661 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 662 |
+
|
| 663 |
+
ref_start_idx = round(len(timesteps) * feedback_start_ratio)
|
| 664 |
+
ref_end_idx = round(len(timesteps) * feedback_end_ratio)
|
| 665 |
+
|
| 666 |
+
with self.progress_bar(total=num_inference_steps) as pbar:
|
| 667 |
+
for i, t in enumerate(timesteps):
|
| 668 |
+
sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0
|
| 669 |
+
if hasattr(self.scheduler, "sigmas"):
|
| 670 |
+
sigma = self.scheduler.sigmas[i]
|
| 671 |
+
|
| 672 |
+
alpha_hat = 1 / (sigma**2 + 1)
|
| 673 |
+
|
| 674 |
+
z_single = self.scheduler.scale_model_input(latent_noise, t)
|
| 675 |
+
z_all = torch.cat([z_single] * 2, dim=0)
|
| 676 |
+
z_ref = torch.cat([positive_latents, negative_latents], dim=0)
|
| 677 |
+
|
| 678 |
+
if i >= ref_start_idx and i <= ref_end_idx:
|
| 679 |
+
weight_factor = max_weight
|
| 680 |
+
else:
|
| 681 |
+
weight_factor = min_weight
|
| 682 |
+
|
| 683 |
+
pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale)
|
| 684 |
+
neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale)
|
| 685 |
+
|
| 686 |
+
if z_ref.size(0) > 0 and weight_factor > 0:
|
| 687 |
+
noise = torch.randn_like(z_ref)
|
| 688 |
+
if isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
|
| 689 |
+
z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype)
|
| 690 |
+
else:
|
| 691 |
+
z_ref_noised = self.scheduler.add_noise(z_ref, noise, t)
|
| 692 |
+
|
| 693 |
+
ref_prompt_embd = torch.cat(
|
| 694 |
+
[null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0
|
| 695 |
+
)
|
| 696 |
+
cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd)
|
| 697 |
+
|
| 698 |
+
n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0]
|
| 699 |
+
cached_pos_hs, cached_neg_hs = [], []
|
| 700 |
+
for hs in cached_hidden_states:
|
| 701 |
+
cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0)
|
| 702 |
+
cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1)
|
| 703 |
+
cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1)
|
| 704 |
+
cached_pos_hs.append(cached_pos)
|
| 705 |
+
cached_neg_hs.append(cached_neg)
|
| 706 |
+
|
| 707 |
+
if n_pos == 0:
|
| 708 |
+
cached_pos_hs = None
|
| 709 |
+
if n_neg == 0:
|
| 710 |
+
cached_neg_hs = None
|
| 711 |
+
else:
|
| 712 |
+
cached_pos_hs, cached_neg_hs = None, None
|
| 713 |
+
unet_out = self.unet_forward_with_cached_hidden_states(
|
| 714 |
+
z_all,
|
| 715 |
+
t,
|
| 716 |
+
prompt_embd=batched_prompt_embd,
|
| 717 |
+
cached_pos_hiddens=cached_pos_hs,
|
| 718 |
+
cached_neg_hiddens=cached_neg_hs,
|
| 719 |
+
pos_weights=pos_ws,
|
| 720 |
+
neg_weights=neg_ws,
|
| 721 |
+
)[0]
|
| 722 |
+
|
| 723 |
+
noise_cond, noise_uncond = unet_out.chunk(2)
|
| 724 |
+
guidance = noise_cond - noise_uncond
|
| 725 |
+
noise_pred = noise_uncond + guidance_scale * guidance
|
| 726 |
+
latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0]
|
| 727 |
+
|
| 728 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 729 |
+
pbar.update()
|
| 730 |
+
|
| 731 |
+
y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 732 |
+
imgs = self.image_processor.postprocess(
|
| 733 |
+
y,
|
| 734 |
+
output_type=output_type,
|
| 735 |
+
)
|
| 736 |
+
|
| 737 |
+
if not return_dict:
|
| 738 |
+
return imgs
|
| 739 |
+
|
| 740 |
+
return StableDiffusionPipelineOutput(imgs, False)
|
| 741 |
+
|
| 742 |
+
def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype):
|
| 743 |
+
"""
|
| 744 |
+
Convert latent PIL image to a torch tensor for further processing.
|
| 745 |
+
"""
|
| 746 |
+
if isinstance(image, str):
|
| 747 |
+
image = Image.open(image)
|
| 748 |
+
if not image.mode == "RGB":
|
| 749 |
+
image = image.convert("RGB")
|
| 750 |
+
image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0]
|
| 751 |
+
return image.type(dtype)
|
v0.27.0/pipeline_null_text_inversion.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as nnf
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from torch.optim.adam import Adam
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from diffusers import StableDiffusionPipeline
|
| 12 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def retrieve_timesteps(
|
| 16 |
+
scheduler,
|
| 17 |
+
num_inference_steps=None,
|
| 18 |
+
device=None,
|
| 19 |
+
timesteps=None,
|
| 20 |
+
**kwargs,
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 24 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 25 |
+
Args:
|
| 26 |
+
scheduler (`SchedulerMixin`):
|
| 27 |
+
The scheduler to get timesteps from.
|
| 28 |
+
num_inference_steps (`int`):
|
| 29 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 30 |
+
`timesteps` must be `None`.
|
| 31 |
+
device (`str` or `torch.device`, *optional*):
|
| 32 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 33 |
+
timesteps (`List[int]`, *optional*):
|
| 34 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 35 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 36 |
+
must be `None`.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 40 |
+
second element is the number of inference steps.
|
| 41 |
+
"""
|
| 42 |
+
if timesteps is not None:
|
| 43 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 44 |
+
if not accepts_timesteps:
|
| 45 |
+
raise ValueError(
|
| 46 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 47 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 48 |
+
)
|
| 49 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 50 |
+
timesteps = scheduler.timesteps
|
| 51 |
+
num_inference_steps = len(timesteps)
|
| 52 |
+
else:
|
| 53 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 54 |
+
timesteps = scheduler.timesteps
|
| 55 |
+
return timesteps, num_inference_steps
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class NullTextPipeline(StableDiffusionPipeline):
|
| 59 |
+
def get_noise_pred(self, latents, t, context):
|
| 60 |
+
latents_input = torch.cat([latents] * 2)
|
| 61 |
+
guidance_scale = 7.5
|
| 62 |
+
noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"]
|
| 63 |
+
noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
|
| 64 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
|
| 65 |
+
latents = self.prev_step(noise_pred, t, latents)
|
| 66 |
+
return latents
|
| 67 |
+
|
| 68 |
+
def get_noise_pred_single(self, latents, t, context):
|
| 69 |
+
noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"]
|
| 70 |
+
return noise_pred
|
| 71 |
+
|
| 72 |
+
@torch.no_grad()
|
| 73 |
+
def image2latent(self, image_path):
|
| 74 |
+
image = Image.open(image_path).convert("RGB")
|
| 75 |
+
image = np.array(image)
|
| 76 |
+
image = torch.from_numpy(image).float() / 127.5 - 1
|
| 77 |
+
image = image.permute(2, 0, 1).unsqueeze(0).to(self.device)
|
| 78 |
+
latents = self.vae.encode(image)["latent_dist"].mean
|
| 79 |
+
latents = latents * 0.18215
|
| 80 |
+
return latents
|
| 81 |
+
|
| 82 |
+
@torch.no_grad()
|
| 83 |
+
def latent2image(self, latents):
|
| 84 |
+
latents = 1 / 0.18215 * latents.detach()
|
| 85 |
+
image = self.vae.decode(latents)["sample"].detach()
|
| 86 |
+
image = self.processor.postprocess(image, output_type="pil")[0]
|
| 87 |
+
return image
|
| 88 |
+
|
| 89 |
+
def prev_step(self, model_output, timestep, sample):
|
| 90 |
+
prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
|
| 91 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 92 |
+
alpha_prod_t_prev = (
|
| 93 |
+
self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
|
| 94 |
+
)
|
| 95 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 96 |
+
pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
|
| 97 |
+
pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
|
| 98 |
+
prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction
|
| 99 |
+
return prev_sample
|
| 100 |
+
|
| 101 |
+
def next_step(self, model_output, timestep, sample):
|
| 102 |
+
timestep, next_timestep = (
|
| 103 |
+
min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999),
|
| 104 |
+
timestep,
|
| 105 |
+
)
|
| 106 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
|
| 107 |
+
alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
|
| 108 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 109 |
+
next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
|
| 110 |
+
next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
|
| 111 |
+
next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction
|
| 112 |
+
return next_sample
|
| 113 |
+
|
| 114 |
+
def null_optimization(self, latents, context, num_inner_steps, epsilon):
|
| 115 |
+
uncond_embeddings, cond_embeddings = context.chunk(2)
|
| 116 |
+
uncond_embeddings_list = []
|
| 117 |
+
latent_cur = latents[-1]
|
| 118 |
+
bar = tqdm(total=num_inner_steps * self.num_inference_steps)
|
| 119 |
+
for i in range(self.num_inference_steps):
|
| 120 |
+
uncond_embeddings = uncond_embeddings.clone().detach()
|
| 121 |
+
uncond_embeddings.requires_grad = True
|
| 122 |
+
optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0))
|
| 123 |
+
latent_prev = latents[len(latents) - i - 2]
|
| 124 |
+
t = self.scheduler.timesteps[i]
|
| 125 |
+
with torch.no_grad():
|
| 126 |
+
noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)
|
| 127 |
+
for j in range(num_inner_steps):
|
| 128 |
+
noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)
|
| 129 |
+
noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond)
|
| 130 |
+
latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)
|
| 131 |
+
loss = nnf.mse_loss(latents_prev_rec, latent_prev)
|
| 132 |
+
optimizer.zero_grad()
|
| 133 |
+
loss.backward()
|
| 134 |
+
optimizer.step()
|
| 135 |
+
loss_item = loss.item()
|
| 136 |
+
bar.update()
|
| 137 |
+
if loss_item < epsilon + i * 2e-5:
|
| 138 |
+
break
|
| 139 |
+
for j in range(j + 1, num_inner_steps):
|
| 140 |
+
bar.update()
|
| 141 |
+
uncond_embeddings_list.append(uncond_embeddings[:1].detach())
|
| 142 |
+
with torch.no_grad():
|
| 143 |
+
context = torch.cat([uncond_embeddings, cond_embeddings])
|
| 144 |
+
latent_cur = self.get_noise_pred(latent_cur, t, context)
|
| 145 |
+
bar.close()
|
| 146 |
+
return uncond_embeddings_list
|
| 147 |
+
|
| 148 |
+
@torch.no_grad()
|
| 149 |
+
def ddim_inversion_loop(self, latent, context):
|
| 150 |
+
self.scheduler.set_timesteps(self.num_inference_steps)
|
| 151 |
+
_, cond_embeddings = context.chunk(2)
|
| 152 |
+
all_latent = [latent]
|
| 153 |
+
latent = latent.clone().detach()
|
| 154 |
+
with torch.no_grad():
|
| 155 |
+
for i in range(0, self.num_inference_steps):
|
| 156 |
+
t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1]
|
| 157 |
+
noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"]
|
| 158 |
+
latent = self.next_step(noise_pred, t, latent)
|
| 159 |
+
all_latent.append(latent)
|
| 160 |
+
return all_latent
|
| 161 |
+
|
| 162 |
+
def get_context(self, prompt):
|
| 163 |
+
uncond_input = self.tokenizer(
|
| 164 |
+
[""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
|
| 165 |
+
)
|
| 166 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 167 |
+
text_input = self.tokenizer(
|
| 168 |
+
[prompt],
|
| 169 |
+
padding="max_length",
|
| 170 |
+
max_length=self.tokenizer.model_max_length,
|
| 171 |
+
truncation=True,
|
| 172 |
+
return_tensors="pt",
|
| 173 |
+
)
|
| 174 |
+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 175 |
+
context = torch.cat([uncond_embeddings, text_embeddings])
|
| 176 |
+
return context
|
| 177 |
+
|
| 178 |
+
def invert(
|
| 179 |
+
self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50
|
| 180 |
+
):
|
| 181 |
+
self.num_inference_steps = num_inference_steps
|
| 182 |
+
context = self.get_context(prompt)
|
| 183 |
+
latent = self.image2latent(image_path)
|
| 184 |
+
ddim_latents = self.ddim_inversion_loop(latent, context)
|
| 185 |
+
if os.path.exists(image_path + ".pt"):
|
| 186 |
+
uncond_embeddings = torch.load(image_path + ".pt")
|
| 187 |
+
else:
|
| 188 |
+
uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon)
|
| 189 |
+
uncond_embeddings = torch.stack(uncond_embeddings, 0)
|
| 190 |
+
torch.save(uncond_embeddings, image_path + ".pt")
|
| 191 |
+
return ddim_latents[-1], uncond_embeddings
|
| 192 |
+
|
| 193 |
+
@torch.no_grad()
|
| 194 |
+
def __call__(
|
| 195 |
+
self,
|
| 196 |
+
prompt,
|
| 197 |
+
uncond_embeddings,
|
| 198 |
+
inverted_latent,
|
| 199 |
+
num_inference_steps: int = 50,
|
| 200 |
+
timesteps=None,
|
| 201 |
+
guidance_scale=7.5,
|
| 202 |
+
negative_prompt=None,
|
| 203 |
+
num_images_per_prompt=1,
|
| 204 |
+
generator=None,
|
| 205 |
+
latents=None,
|
| 206 |
+
prompt_embeds=None,
|
| 207 |
+
negative_prompt_embeds=None,
|
| 208 |
+
output_type="pil",
|
| 209 |
+
):
|
| 210 |
+
self._guidance_scale = guidance_scale
|
| 211 |
+
# 0. Default height and width to unet
|
| 212 |
+
height = self.unet.config.sample_size * self.vae_scale_factor
|
| 213 |
+
width = self.unet.config.sample_size * self.vae_scale_factor
|
| 214 |
+
# to deal with lora scaling and other possible forward hook
|
| 215 |
+
callback_steps = None
|
| 216 |
+
# 1. Check inputs. Raise error if not correct
|
| 217 |
+
self.check_inputs(
|
| 218 |
+
prompt,
|
| 219 |
+
height,
|
| 220 |
+
width,
|
| 221 |
+
callback_steps,
|
| 222 |
+
negative_prompt,
|
| 223 |
+
prompt_embeds,
|
| 224 |
+
negative_prompt_embeds,
|
| 225 |
+
)
|
| 226 |
+
# 2. Define call parameter
|
| 227 |
+
device = self._execution_device
|
| 228 |
+
# 3. Encode input prompt
|
| 229 |
+
prompt_embeds, _ = self.encode_prompt(
|
| 230 |
+
prompt,
|
| 231 |
+
device,
|
| 232 |
+
num_images_per_prompt,
|
| 233 |
+
self.do_classifier_free_guidance,
|
| 234 |
+
negative_prompt,
|
| 235 |
+
prompt_embeds=prompt_embeds,
|
| 236 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 237 |
+
)
|
| 238 |
+
# 4. Prepare timesteps
|
| 239 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 240 |
+
latents = inverted_latent
|
| 241 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 242 |
+
for i, t in enumerate(timesteps):
|
| 243 |
+
noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"]
|
| 244 |
+
noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"]
|
| 245 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
|
| 246 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 247 |
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 248 |
+
progress_bar.update()
|
| 249 |
+
if not output_type == "latent":
|
| 250 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 251 |
+
0
|
| 252 |
+
]
|
| 253 |
+
else:
|
| 254 |
+
image = latents
|
| 255 |
+
image = self.image_processor.postprocess(
|
| 256 |
+
image, output_type=output_type, do_denormalize=[True] * image.shape[0]
|
| 257 |
+
)
|
| 258 |
+
# Offload all models
|
| 259 |
+
self.maybe_free_model_hooks()
|
| 260 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
|
v0.27.0/pipeline_prompt2prompt.py
ADDED
|
@@ -0,0 +1,1422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import abc
|
| 18 |
+
import inspect
|
| 19 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
from packaging import version
|
| 25 |
+
from transformers import (
|
| 26 |
+
CLIPImageProcessor,
|
| 27 |
+
CLIPTextModel,
|
| 28 |
+
CLIPTokenizer,
|
| 29 |
+
CLIPVisionModelWithProjection,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
|
| 33 |
+
from diffusers.configuration_utils import FrozenDict, deprecate
|
| 34 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 35 |
+
from diffusers.loaders import (
|
| 36 |
+
FromSingleFileMixin,
|
| 37 |
+
IPAdapterMixin,
|
| 38 |
+
LoraLoaderMixin,
|
| 39 |
+
TextualInversionLoaderMixin,
|
| 40 |
+
)
|
| 41 |
+
from diffusers.models.attention import Attention
|
| 42 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 43 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 44 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
| 45 |
+
StableDiffusionSafetyChecker,
|
| 46 |
+
)
|
| 47 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 48 |
+
from diffusers.utils import (
|
| 49 |
+
USE_PEFT_BACKEND,
|
| 50 |
+
logging,
|
| 51 |
+
scale_lora_layers,
|
| 52 |
+
unscale_lora_layers,
|
| 53 |
+
)
|
| 54 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
logger = logging.get_logger(__name__)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 61 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 62 |
+
"""
|
| 63 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 64 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 65 |
+
"""
|
| 66 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 67 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 68 |
+
# rescale the results from guidance (fixes overexposure)
|
| 69 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 70 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 71 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 72 |
+
return noise_cfg
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class Prompt2PromptPipeline(
|
| 76 |
+
DiffusionPipeline,
|
| 77 |
+
TextualInversionLoaderMixin,
|
| 78 |
+
LoraLoaderMixin,
|
| 79 |
+
IPAdapterMixin,
|
| 80 |
+
FromSingleFileMixin,
|
| 81 |
+
):
|
| 82 |
+
r"""
|
| 83 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 84 |
+
|
| 85 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 86 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 87 |
+
|
| 88 |
+
The pipeline also inherits the following loading methods:
|
| 89 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 90 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 91 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 92 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 93 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
vae ([`AutoencoderKL`]):
|
| 97 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 98 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 99 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 100 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 101 |
+
A `CLIPTokenizer` to tokenize text.
|
| 102 |
+
unet ([`UNet2DConditionModel`]):
|
| 103 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 104 |
+
scheduler ([`SchedulerMixin`]):
|
| 105 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 106 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 107 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 108 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 109 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 110 |
+
about a model's potential harms.
|
| 111 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 112 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 116 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 117 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 118 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 119 |
+
|
| 120 |
+
def __init__(
|
| 121 |
+
self,
|
| 122 |
+
vae: AutoencoderKL,
|
| 123 |
+
text_encoder: CLIPTextModel,
|
| 124 |
+
tokenizer: CLIPTokenizer,
|
| 125 |
+
unet: UNet2DConditionModel,
|
| 126 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 127 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 128 |
+
feature_extractor: CLIPImageProcessor,
|
| 129 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 130 |
+
requires_safety_checker: bool = True,
|
| 131 |
+
):
|
| 132 |
+
super().__init__()
|
| 133 |
+
|
| 134 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 135 |
+
deprecation_message = (
|
| 136 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 137 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 138 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 139 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 140 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 141 |
+
" file"
|
| 142 |
+
)
|
| 143 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 144 |
+
new_config = dict(scheduler.config)
|
| 145 |
+
new_config["steps_offset"] = 1
|
| 146 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 147 |
+
|
| 148 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 149 |
+
deprecation_message = (
|
| 150 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 151 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 152 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 153 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 154 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 155 |
+
)
|
| 156 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 157 |
+
new_config = dict(scheduler.config)
|
| 158 |
+
new_config["clip_sample"] = False
|
| 159 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 160 |
+
|
| 161 |
+
if safety_checker is None and requires_safety_checker:
|
| 162 |
+
logger.warning(
|
| 163 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 164 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 165 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 166 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 167 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 168 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
if safety_checker is not None and feature_extractor is None:
|
| 172 |
+
raise ValueError(
|
| 173 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 174 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 178 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 179 |
+
) < version.parse("0.9.0.dev0")
|
| 180 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 181 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 182 |
+
deprecation_message = (
|
| 183 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 184 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 185 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 186 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 187 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 188 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 189 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 190 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 191 |
+
" the `unet/config.json` file"
|
| 192 |
+
)
|
| 193 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 194 |
+
new_config = dict(unet.config)
|
| 195 |
+
new_config["sample_size"] = 64
|
| 196 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 197 |
+
|
| 198 |
+
self.register_modules(
|
| 199 |
+
vae=vae,
|
| 200 |
+
text_encoder=text_encoder,
|
| 201 |
+
tokenizer=tokenizer,
|
| 202 |
+
unet=unet,
|
| 203 |
+
scheduler=scheduler,
|
| 204 |
+
safety_checker=safety_checker,
|
| 205 |
+
feature_extractor=feature_extractor,
|
| 206 |
+
image_encoder=image_encoder,
|
| 207 |
+
)
|
| 208 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 209 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 210 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 211 |
+
|
| 212 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 213 |
+
def _encode_prompt(
|
| 214 |
+
self,
|
| 215 |
+
prompt,
|
| 216 |
+
device,
|
| 217 |
+
num_images_per_prompt,
|
| 218 |
+
do_classifier_free_guidance,
|
| 219 |
+
negative_prompt=None,
|
| 220 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 221 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 222 |
+
lora_scale: Optional[float] = None,
|
| 223 |
+
**kwargs,
|
| 224 |
+
):
|
| 225 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 226 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 227 |
+
|
| 228 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 229 |
+
prompt=prompt,
|
| 230 |
+
device=device,
|
| 231 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 232 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 233 |
+
negative_prompt=negative_prompt,
|
| 234 |
+
prompt_embeds=prompt_embeds,
|
| 235 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 236 |
+
lora_scale=lora_scale,
|
| 237 |
+
**kwargs,
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# concatenate for backwards comp
|
| 241 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 242 |
+
|
| 243 |
+
return prompt_embeds
|
| 244 |
+
|
| 245 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
|
| 246 |
+
def encode_prompt(
|
| 247 |
+
self,
|
| 248 |
+
prompt,
|
| 249 |
+
device,
|
| 250 |
+
num_images_per_prompt,
|
| 251 |
+
do_classifier_free_guidance,
|
| 252 |
+
negative_prompt=None,
|
| 253 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 254 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 255 |
+
lora_scale: Optional[float] = None,
|
| 256 |
+
clip_skip: Optional[int] = None,
|
| 257 |
+
):
|
| 258 |
+
r"""
|
| 259 |
+
Encodes the prompt into text encoder hidden states.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 263 |
+
prompt to be encoded
|
| 264 |
+
device: (`torch.device`):
|
| 265 |
+
torch device
|
| 266 |
+
num_images_per_prompt (`int`):
|
| 267 |
+
number of images that should be generated per prompt
|
| 268 |
+
do_classifier_free_guidance (`bool`):
|
| 269 |
+
whether to use classifier free guidance or not
|
| 270 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 271 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 272 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 273 |
+
less than `1`).
|
| 274 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 275 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 276 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 277 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 278 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 279 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 280 |
+
argument.
|
| 281 |
+
lora_scale (`float`, *optional*):
|
| 282 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 283 |
+
clip_skip (`int`, *optional*):
|
| 284 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 285 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 286 |
+
"""
|
| 287 |
+
# set lora scale so that monkey patched LoRA
|
| 288 |
+
# function of text encoder can correctly access it
|
| 289 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 290 |
+
self._lora_scale = lora_scale
|
| 291 |
+
|
| 292 |
+
# dynamically adjust the LoRA scale
|
| 293 |
+
if not USE_PEFT_BACKEND:
|
| 294 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 295 |
+
else:
|
| 296 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 297 |
+
|
| 298 |
+
if prompt is not None and isinstance(prompt, str):
|
| 299 |
+
batch_size = 1
|
| 300 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 301 |
+
batch_size = len(prompt)
|
| 302 |
+
else:
|
| 303 |
+
batch_size = prompt_embeds.shape[0]
|
| 304 |
+
|
| 305 |
+
if prompt_embeds is None:
|
| 306 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 307 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 308 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 309 |
+
|
| 310 |
+
text_inputs = self.tokenizer(
|
| 311 |
+
prompt,
|
| 312 |
+
padding="max_length",
|
| 313 |
+
max_length=self.tokenizer.model_max_length,
|
| 314 |
+
truncation=True,
|
| 315 |
+
return_tensors="pt",
|
| 316 |
+
)
|
| 317 |
+
text_input_ids = text_inputs.input_ids
|
| 318 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 319 |
+
|
| 320 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 321 |
+
text_input_ids, untruncated_ids
|
| 322 |
+
):
|
| 323 |
+
removed_text = self.tokenizer.batch_decode(
|
| 324 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 325 |
+
)
|
| 326 |
+
logger.warning(
|
| 327 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 328 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 332 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 333 |
+
else:
|
| 334 |
+
attention_mask = None
|
| 335 |
+
|
| 336 |
+
if clip_skip is None:
|
| 337 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 338 |
+
prompt_embeds = prompt_embeds[0]
|
| 339 |
+
else:
|
| 340 |
+
prompt_embeds = self.text_encoder(
|
| 341 |
+
text_input_ids.to(device),
|
| 342 |
+
attention_mask=attention_mask,
|
| 343 |
+
output_hidden_states=True,
|
| 344 |
+
)
|
| 345 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 346 |
+
# all the hidden states from the encoder layers. Then index into
|
| 347 |
+
# the tuple to access the hidden states from the desired layer.
|
| 348 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 349 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 350 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 351 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 352 |
+
# layer.
|
| 353 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 354 |
+
|
| 355 |
+
if self.text_encoder is not None:
|
| 356 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 357 |
+
elif self.unet is not None:
|
| 358 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 359 |
+
else:
|
| 360 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 361 |
+
|
| 362 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 363 |
+
|
| 364 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 365 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 366 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 367 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 368 |
+
|
| 369 |
+
# get unconditional embeddings for classifier free guidance
|
| 370 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 371 |
+
uncond_tokens: List[str]
|
| 372 |
+
if negative_prompt is None:
|
| 373 |
+
uncond_tokens = [""] * batch_size
|
| 374 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 375 |
+
raise TypeError(
|
| 376 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 377 |
+
f" {type(prompt)}."
|
| 378 |
+
)
|
| 379 |
+
elif isinstance(negative_prompt, str):
|
| 380 |
+
uncond_tokens = [negative_prompt]
|
| 381 |
+
elif batch_size != len(negative_prompt):
|
| 382 |
+
raise ValueError(
|
| 383 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 384 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 385 |
+
" the batch size of `prompt`."
|
| 386 |
+
)
|
| 387 |
+
else:
|
| 388 |
+
uncond_tokens = negative_prompt
|
| 389 |
+
|
| 390 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 391 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 392 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 393 |
+
|
| 394 |
+
max_length = prompt_embeds.shape[1]
|
| 395 |
+
uncond_input = self.tokenizer(
|
| 396 |
+
uncond_tokens,
|
| 397 |
+
padding="max_length",
|
| 398 |
+
max_length=max_length,
|
| 399 |
+
truncation=True,
|
| 400 |
+
return_tensors="pt",
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 404 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 405 |
+
else:
|
| 406 |
+
attention_mask = None
|
| 407 |
+
|
| 408 |
+
negative_prompt_embeds = self.text_encoder(
|
| 409 |
+
uncond_input.input_ids.to(device),
|
| 410 |
+
attention_mask=attention_mask,
|
| 411 |
+
)
|
| 412 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 413 |
+
|
| 414 |
+
if do_classifier_free_guidance:
|
| 415 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 416 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 417 |
+
|
| 418 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 419 |
+
|
| 420 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 421 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 422 |
+
|
| 423 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 424 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 425 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 426 |
+
|
| 427 |
+
return prompt_embeds, negative_prompt_embeds
|
| 428 |
+
|
| 429 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 430 |
+
def run_safety_checker(self, image, device, dtype):
|
| 431 |
+
if self.safety_checker is None:
|
| 432 |
+
has_nsfw_concept = None
|
| 433 |
+
else:
|
| 434 |
+
if torch.is_tensor(image):
|
| 435 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 436 |
+
else:
|
| 437 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 438 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 439 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 440 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 441 |
+
)
|
| 442 |
+
return image, has_nsfw_concept
|
| 443 |
+
|
| 444 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 445 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 446 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 447 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 448 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 449 |
+
# and should be between [0, 1]
|
| 450 |
+
|
| 451 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 452 |
+
extra_step_kwargs = {}
|
| 453 |
+
if accepts_eta:
|
| 454 |
+
extra_step_kwargs["eta"] = eta
|
| 455 |
+
|
| 456 |
+
# check if the scheduler accepts generator
|
| 457 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 458 |
+
if accepts_generator:
|
| 459 |
+
extra_step_kwargs["generator"] = generator
|
| 460 |
+
return extra_step_kwargs
|
| 461 |
+
|
| 462 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
| 463 |
+
def check_inputs(
|
| 464 |
+
self,
|
| 465 |
+
prompt,
|
| 466 |
+
height,
|
| 467 |
+
width,
|
| 468 |
+
callback_steps,
|
| 469 |
+
negative_prompt=None,
|
| 470 |
+
prompt_embeds=None,
|
| 471 |
+
negative_prompt_embeds=None,
|
| 472 |
+
ip_adapter_image=None,
|
| 473 |
+
ip_adapter_image_embeds=None,
|
| 474 |
+
callback_on_step_end_tensor_inputs=None,
|
| 475 |
+
):
|
| 476 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 477 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 478 |
+
|
| 479 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 480 |
+
raise ValueError(
|
| 481 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 482 |
+
f" {type(callback_steps)}."
|
| 483 |
+
)
|
| 484 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 485 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 486 |
+
):
|
| 487 |
+
raise ValueError(
|
| 488 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
if prompt is not None and prompt_embeds is not None:
|
| 492 |
+
raise ValueError(
|
| 493 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 494 |
+
" only forward one of the two."
|
| 495 |
+
)
|
| 496 |
+
elif prompt is None and prompt_embeds is None:
|
| 497 |
+
raise ValueError(
|
| 498 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 499 |
+
)
|
| 500 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 501 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 502 |
+
|
| 503 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 504 |
+
raise ValueError(
|
| 505 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 506 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 510 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 511 |
+
raise ValueError(
|
| 512 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 513 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 514 |
+
f" {negative_prompt_embeds.shape}."
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
| 518 |
+
raise ValueError(
|
| 519 |
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 523 |
+
def prepare_latents(
|
| 524 |
+
self,
|
| 525 |
+
batch_size,
|
| 526 |
+
num_channels_latents,
|
| 527 |
+
height,
|
| 528 |
+
width,
|
| 529 |
+
dtype,
|
| 530 |
+
device,
|
| 531 |
+
generator,
|
| 532 |
+
latents=None,
|
| 533 |
+
):
|
| 534 |
+
shape = (
|
| 535 |
+
batch_size,
|
| 536 |
+
num_channels_latents,
|
| 537 |
+
height // self.vae_scale_factor,
|
| 538 |
+
width // self.vae_scale_factor,
|
| 539 |
+
)
|
| 540 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 541 |
+
raise ValueError(
|
| 542 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 543 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
if latents is None:
|
| 547 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 548 |
+
else:
|
| 549 |
+
latents = latents.to(device)
|
| 550 |
+
|
| 551 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 552 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 553 |
+
return latents
|
| 554 |
+
|
| 555 |
+
@torch.no_grad()
|
| 556 |
+
def __call__(
|
| 557 |
+
self,
|
| 558 |
+
prompt: Union[str, List[str]],
|
| 559 |
+
height: Optional[int] = None,
|
| 560 |
+
width: Optional[int] = None,
|
| 561 |
+
num_inference_steps: int = 50,
|
| 562 |
+
guidance_scale: float = 7.5,
|
| 563 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 564 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 565 |
+
eta: float = 0.0,
|
| 566 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 567 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 568 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 569 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 570 |
+
output_type: Optional[str] = "pil",
|
| 571 |
+
return_dict: bool = True,
|
| 572 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 573 |
+
callback_steps: Optional[int] = 1,
|
| 574 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 575 |
+
guidance_rescale: float = 0.0,
|
| 576 |
+
):
|
| 577 |
+
r"""
|
| 578 |
+
Function invoked when calling the pipeline for generation.
|
| 579 |
+
|
| 580 |
+
Args:
|
| 581 |
+
prompt (`str` or `List[str]`):
|
| 582 |
+
The prompt or prompts to guide the image generation.
|
| 583 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 584 |
+
The height in pixels of the generated image.
|
| 585 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 586 |
+
The width in pixels of the generated image.
|
| 587 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 588 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 589 |
+
expense of slower inference.
|
| 590 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 591 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 592 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 593 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 594 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 595 |
+
usually at the expense of lower image quality.
|
| 596 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 597 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 598 |
+
if `guidance_scale` is less than `1`).
|
| 599 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 600 |
+
The number of images to generate per prompt.
|
| 601 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 602 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 603 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 604 |
+
generator (`torch.Generator`, *optional*):
|
| 605 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 606 |
+
to make generation deterministic.
|
| 607 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 608 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 609 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 610 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 611 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 612 |
+
The output format of the generate image. Choose between
|
| 613 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 614 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 615 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 616 |
+
plain tuple.
|
| 617 |
+
callback (`Callable`, *optional*):
|
| 618 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 619 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 620 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 621 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 622 |
+
called at every step.
|
| 623 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 624 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 625 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 626 |
+
|
| 627 |
+
The keyword arguments to configure the edit are:
|
| 628 |
+
- edit_type (`str`). The edit type to apply. Can be either of `replace`, `refine`, `reweight`.
|
| 629 |
+
- n_cross_replace (`int`): Number of diffusion steps in which cross attention should be replaced
|
| 630 |
+
- n_self_replace (`int`): Number of diffusion steps in which self attention should be replaced
|
| 631 |
+
- local_blend_words(`List[str]`, *optional*, default to `None`): Determines which area should be
|
| 632 |
+
changed. If None, then the whole image can be changed.
|
| 633 |
+
- equalizer_words(`List[str]`, *optional*, default to `None`): Required for edit type `reweight`.
|
| 634 |
+
Determines which words should be enhanced.
|
| 635 |
+
- equalizer_strengths (`List[float]`, *optional*, default to `None`) Required for edit type `reweight`.
|
| 636 |
+
Determines which how much the words in `equalizer_words` should be enhanced.
|
| 637 |
+
|
| 638 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 639 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 640 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 641 |
+
using zero terminal SNR.
|
| 642 |
+
|
| 643 |
+
Returns:
|
| 644 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 645 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 646 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 647 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 648 |
+
(nsfw) content, according to the `safety_checker`.
|
| 649 |
+
"""
|
| 650 |
+
|
| 651 |
+
self.controller = create_controller(
|
| 652 |
+
prompt,
|
| 653 |
+
cross_attention_kwargs,
|
| 654 |
+
num_inference_steps,
|
| 655 |
+
tokenizer=self.tokenizer,
|
| 656 |
+
device=self.device,
|
| 657 |
+
)
|
| 658 |
+
self.register_attention_control(self.controller) # add attention controller
|
| 659 |
+
|
| 660 |
+
# 0. Default height and width to unet
|
| 661 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 662 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 663 |
+
|
| 664 |
+
# 1. Check inputs. Raise error if not correct
|
| 665 |
+
self.check_inputs(prompt, height, width, callback_steps)
|
| 666 |
+
|
| 667 |
+
# 2. Define call parameters
|
| 668 |
+
if prompt is not None and isinstance(prompt, str):
|
| 669 |
+
batch_size = 1
|
| 670 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 671 |
+
batch_size = len(prompt)
|
| 672 |
+
else:
|
| 673 |
+
batch_size = prompt_embeds.shape[0]
|
| 674 |
+
|
| 675 |
+
device = self._execution_device
|
| 676 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 677 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 678 |
+
# corresponds to doing no classifier free guidance.
|
| 679 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 680 |
+
|
| 681 |
+
# 3. Encode input prompt
|
| 682 |
+
text_encoder_lora_scale = (
|
| 683 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 684 |
+
)
|
| 685 |
+
prompt_embeds = self._encode_prompt(
|
| 686 |
+
prompt,
|
| 687 |
+
device,
|
| 688 |
+
num_images_per_prompt,
|
| 689 |
+
do_classifier_free_guidance,
|
| 690 |
+
negative_prompt,
|
| 691 |
+
prompt_embeds=prompt_embeds,
|
| 692 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 693 |
+
lora_scale=text_encoder_lora_scale,
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
# 4. Prepare timesteps
|
| 697 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 698 |
+
timesteps = self.scheduler.timesteps
|
| 699 |
+
|
| 700 |
+
# 5. Prepare latent variables
|
| 701 |
+
num_channels_latents = self.unet.config.in_channels
|
| 702 |
+
latents = self.prepare_latents(
|
| 703 |
+
batch_size * num_images_per_prompt,
|
| 704 |
+
num_channels_latents,
|
| 705 |
+
height,
|
| 706 |
+
width,
|
| 707 |
+
prompt_embeds.dtype,
|
| 708 |
+
device,
|
| 709 |
+
generator,
|
| 710 |
+
latents,
|
| 711 |
+
)
|
| 712 |
+
|
| 713 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 714 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 715 |
+
|
| 716 |
+
# 7. Denoising loop
|
| 717 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 718 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 719 |
+
for i, t in enumerate(timesteps):
|
| 720 |
+
# expand the latents if we are doing classifier free guidance
|
| 721 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 722 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 723 |
+
|
| 724 |
+
# predict the noise residual
|
| 725 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
|
| 726 |
+
|
| 727 |
+
# perform guidance
|
| 728 |
+
if do_classifier_free_guidance:
|
| 729 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 730 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 731 |
+
|
| 732 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 733 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 734 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 735 |
+
|
| 736 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 737 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 738 |
+
|
| 739 |
+
# step callback
|
| 740 |
+
latents = self.controller.step_callback(latents)
|
| 741 |
+
|
| 742 |
+
# call the callback, if provided
|
| 743 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 744 |
+
progress_bar.update()
|
| 745 |
+
if callback is not None and i % callback_steps == 0:
|
| 746 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 747 |
+
callback(step_idx, t, latents)
|
| 748 |
+
|
| 749 |
+
# 8. Post-processing
|
| 750 |
+
if not output_type == "latent":
|
| 751 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 752 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 753 |
+
else:
|
| 754 |
+
image = latents
|
| 755 |
+
has_nsfw_concept = None
|
| 756 |
+
|
| 757 |
+
# 9. Run safety checker
|
| 758 |
+
if has_nsfw_concept is None:
|
| 759 |
+
do_denormalize = [True] * image.shape[0]
|
| 760 |
+
else:
|
| 761 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 762 |
+
|
| 763 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 764 |
+
|
| 765 |
+
# Offload last model to CPU
|
| 766 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 767 |
+
self.final_offload_hook.offload()
|
| 768 |
+
|
| 769 |
+
if not return_dict:
|
| 770 |
+
return (image, has_nsfw_concept)
|
| 771 |
+
|
| 772 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 773 |
+
|
| 774 |
+
def register_attention_control(self, controller):
|
| 775 |
+
attn_procs = {}
|
| 776 |
+
cross_att_count = 0
|
| 777 |
+
for name in self.unet.attn_processors.keys():
|
| 778 |
+
(None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim)
|
| 779 |
+
if name.startswith("mid_block"):
|
| 780 |
+
self.unet.config.block_out_channels[-1]
|
| 781 |
+
place_in_unet = "mid"
|
| 782 |
+
elif name.startswith("up_blocks"):
|
| 783 |
+
block_id = int(name[len("up_blocks.")])
|
| 784 |
+
list(reversed(self.unet.config.block_out_channels))[block_id]
|
| 785 |
+
place_in_unet = "up"
|
| 786 |
+
elif name.startswith("down_blocks"):
|
| 787 |
+
block_id = int(name[len("down_blocks.")])
|
| 788 |
+
self.unet.config.block_out_channels[block_id]
|
| 789 |
+
place_in_unet = "down"
|
| 790 |
+
else:
|
| 791 |
+
continue
|
| 792 |
+
cross_att_count += 1
|
| 793 |
+
attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet)
|
| 794 |
+
|
| 795 |
+
self.unet.set_attn_processor(attn_procs)
|
| 796 |
+
controller.num_att_layers = cross_att_count
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
class P2PCrossAttnProcessor:
|
| 800 |
+
def __init__(self, controller, place_in_unet):
|
| 801 |
+
super().__init__()
|
| 802 |
+
self.controller = controller
|
| 803 |
+
self.place_in_unet = place_in_unet
|
| 804 |
+
|
| 805 |
+
def __call__(
|
| 806 |
+
self,
|
| 807 |
+
attn: Attention,
|
| 808 |
+
hidden_states,
|
| 809 |
+
encoder_hidden_states=None,
|
| 810 |
+
attention_mask=None,
|
| 811 |
+
):
|
| 812 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
| 813 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 814 |
+
|
| 815 |
+
query = attn.to_q(hidden_states)
|
| 816 |
+
|
| 817 |
+
is_cross = encoder_hidden_states is not None
|
| 818 |
+
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
|
| 819 |
+
key = attn.to_k(encoder_hidden_states)
|
| 820 |
+
value = attn.to_v(encoder_hidden_states)
|
| 821 |
+
|
| 822 |
+
query = attn.head_to_batch_dim(query)
|
| 823 |
+
key = attn.head_to_batch_dim(key)
|
| 824 |
+
value = attn.head_to_batch_dim(value)
|
| 825 |
+
|
| 826 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 827 |
+
|
| 828 |
+
# one line change
|
| 829 |
+
self.controller(attention_probs, is_cross, self.place_in_unet)
|
| 830 |
+
|
| 831 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 832 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 833 |
+
|
| 834 |
+
# linear proj
|
| 835 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 836 |
+
# dropout
|
| 837 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 838 |
+
|
| 839 |
+
return hidden_states
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
def create_controller(
|
| 843 |
+
prompts: List[str],
|
| 844 |
+
cross_attention_kwargs: Dict,
|
| 845 |
+
num_inference_steps: int,
|
| 846 |
+
tokenizer,
|
| 847 |
+
device,
|
| 848 |
+
) -> AttentionControl:
|
| 849 |
+
edit_type = cross_attention_kwargs.get("edit_type", None)
|
| 850 |
+
local_blend_words = cross_attention_kwargs.get("local_blend_words", None)
|
| 851 |
+
equalizer_words = cross_attention_kwargs.get("equalizer_words", None)
|
| 852 |
+
equalizer_strengths = cross_attention_kwargs.get("equalizer_strengths", None)
|
| 853 |
+
n_cross_replace = cross_attention_kwargs.get("n_cross_replace", 0.4)
|
| 854 |
+
n_self_replace = cross_attention_kwargs.get("n_self_replace", 0.4)
|
| 855 |
+
|
| 856 |
+
# only replace
|
| 857 |
+
if edit_type == "replace" and local_blend_words is None:
|
| 858 |
+
return AttentionReplace(
|
| 859 |
+
prompts,
|
| 860 |
+
num_inference_steps,
|
| 861 |
+
n_cross_replace,
|
| 862 |
+
n_self_replace,
|
| 863 |
+
tokenizer=tokenizer,
|
| 864 |
+
device=device,
|
| 865 |
+
)
|
| 866 |
+
|
| 867 |
+
# replace + localblend
|
| 868 |
+
if edit_type == "replace" and local_blend_words is not None:
|
| 869 |
+
lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
|
| 870 |
+
return AttentionReplace(
|
| 871 |
+
prompts,
|
| 872 |
+
num_inference_steps,
|
| 873 |
+
n_cross_replace,
|
| 874 |
+
n_self_replace,
|
| 875 |
+
lb,
|
| 876 |
+
tokenizer=tokenizer,
|
| 877 |
+
device=device,
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
# only refine
|
| 881 |
+
if edit_type == "refine" and local_blend_words is None:
|
| 882 |
+
return AttentionRefine(
|
| 883 |
+
prompts,
|
| 884 |
+
num_inference_steps,
|
| 885 |
+
n_cross_replace,
|
| 886 |
+
n_self_replace,
|
| 887 |
+
tokenizer=tokenizer,
|
| 888 |
+
device=device,
|
| 889 |
+
)
|
| 890 |
+
|
| 891 |
+
# refine + localblend
|
| 892 |
+
if edit_type == "refine" and local_blend_words is not None:
|
| 893 |
+
lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
|
| 894 |
+
return AttentionRefine(
|
| 895 |
+
prompts,
|
| 896 |
+
num_inference_steps,
|
| 897 |
+
n_cross_replace,
|
| 898 |
+
n_self_replace,
|
| 899 |
+
lb,
|
| 900 |
+
tokenizer=tokenizer,
|
| 901 |
+
device=device,
|
| 902 |
+
)
|
| 903 |
+
|
| 904 |
+
# reweight
|
| 905 |
+
if edit_type == "reweight":
|
| 906 |
+
assert (
|
| 907 |
+
equalizer_words is not None and equalizer_strengths is not None
|
| 908 |
+
), "To use reweight edit, please specify equalizer_words and equalizer_strengths."
|
| 909 |
+
assert len(equalizer_words) == len(
|
| 910 |
+
equalizer_strengths
|
| 911 |
+
), "equalizer_words and equalizer_strengths must be of same length."
|
| 912 |
+
equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer)
|
| 913 |
+
return AttentionReweight(
|
| 914 |
+
prompts,
|
| 915 |
+
num_inference_steps,
|
| 916 |
+
n_cross_replace,
|
| 917 |
+
n_self_replace,
|
| 918 |
+
tokenizer=tokenizer,
|
| 919 |
+
device=device,
|
| 920 |
+
equalizer=equalizer,
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
raise ValueError(f"Edit type {edit_type} not recognized. Use one of: replace, refine, reweight.")
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
class AttentionControl(abc.ABC):
|
| 927 |
+
def step_callback(self, x_t):
|
| 928 |
+
return x_t
|
| 929 |
+
|
| 930 |
+
def between_steps(self):
|
| 931 |
+
return
|
| 932 |
+
|
| 933 |
+
@property
|
| 934 |
+
def num_uncond_att_layers(self):
|
| 935 |
+
return 0
|
| 936 |
+
|
| 937 |
+
@abc.abstractmethod
|
| 938 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 939 |
+
raise NotImplementedError
|
| 940 |
+
|
| 941 |
+
def __call__(self, attn, is_cross: bool, place_in_unet: str):
|
| 942 |
+
if self.cur_att_layer >= self.num_uncond_att_layers:
|
| 943 |
+
h = attn.shape[0]
|
| 944 |
+
attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet)
|
| 945 |
+
self.cur_att_layer += 1
|
| 946 |
+
if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
|
| 947 |
+
self.cur_att_layer = 0
|
| 948 |
+
self.cur_step += 1
|
| 949 |
+
self.between_steps()
|
| 950 |
+
return attn
|
| 951 |
+
|
| 952 |
+
def reset(self):
|
| 953 |
+
self.cur_step = 0
|
| 954 |
+
self.cur_att_layer = 0
|
| 955 |
+
|
| 956 |
+
def __init__(self):
|
| 957 |
+
self.cur_step = 0
|
| 958 |
+
self.num_att_layers = -1
|
| 959 |
+
self.cur_att_layer = 0
|
| 960 |
+
|
| 961 |
+
|
| 962 |
+
class EmptyControl(AttentionControl):
|
| 963 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 964 |
+
return attn
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
class AttentionStore(AttentionControl):
|
| 968 |
+
@staticmethod
|
| 969 |
+
def get_empty_store():
|
| 970 |
+
return {
|
| 971 |
+
"down_cross": [],
|
| 972 |
+
"mid_cross": [],
|
| 973 |
+
"up_cross": [],
|
| 974 |
+
"down_self": [],
|
| 975 |
+
"mid_self": [],
|
| 976 |
+
"up_self": [],
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 980 |
+
key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
|
| 981 |
+
if attn.shape[1] <= 32**2: # avoid memory overhead
|
| 982 |
+
self.step_store[key].append(attn)
|
| 983 |
+
return attn
|
| 984 |
+
|
| 985 |
+
def between_steps(self):
|
| 986 |
+
if len(self.attention_store) == 0:
|
| 987 |
+
self.attention_store = self.step_store
|
| 988 |
+
else:
|
| 989 |
+
for key in self.attention_store:
|
| 990 |
+
for i in range(len(self.attention_store[key])):
|
| 991 |
+
self.attention_store[key][i] += self.step_store[key][i]
|
| 992 |
+
self.step_store = self.get_empty_store()
|
| 993 |
+
|
| 994 |
+
def get_average_attention(self):
|
| 995 |
+
average_attention = {
|
| 996 |
+
key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
|
| 997 |
+
}
|
| 998 |
+
return average_attention
|
| 999 |
+
|
| 1000 |
+
def reset(self):
|
| 1001 |
+
super(AttentionStore, self).reset()
|
| 1002 |
+
self.step_store = self.get_empty_store()
|
| 1003 |
+
self.attention_store = {}
|
| 1004 |
+
|
| 1005 |
+
def __init__(self):
|
| 1006 |
+
super(AttentionStore, self).__init__()
|
| 1007 |
+
self.step_store = self.get_empty_store()
|
| 1008 |
+
self.attention_store = {}
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
class LocalBlend:
|
| 1012 |
+
def __call__(self, x_t, attention_store):
|
| 1013 |
+
k = 1
|
| 1014 |
+
maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3]
|
| 1015 |
+
maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps]
|
| 1016 |
+
maps = torch.cat(maps, dim=1)
|
| 1017 |
+
maps = (maps * self.alpha_layers).sum(-1).mean(1)
|
| 1018 |
+
mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k))
|
| 1019 |
+
mask = F.interpolate(mask, size=(x_t.shape[2:]))
|
| 1020 |
+
mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0]
|
| 1021 |
+
mask = mask.gt(self.threshold)
|
| 1022 |
+
mask = (mask[:1] + mask[1:]).float()
|
| 1023 |
+
x_t = x_t[:1] + mask * (x_t - x_t[:1])
|
| 1024 |
+
return x_t
|
| 1025 |
+
|
| 1026 |
+
def __init__(
|
| 1027 |
+
self,
|
| 1028 |
+
prompts: List[str],
|
| 1029 |
+
words: [List[List[str]]],
|
| 1030 |
+
tokenizer,
|
| 1031 |
+
device,
|
| 1032 |
+
threshold=0.3,
|
| 1033 |
+
max_num_words=77,
|
| 1034 |
+
):
|
| 1035 |
+
self.max_num_words = 77
|
| 1036 |
+
|
| 1037 |
+
alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words)
|
| 1038 |
+
for i, (prompt, words_) in enumerate(zip(prompts, words)):
|
| 1039 |
+
if isinstance(words_, str):
|
| 1040 |
+
words_ = [words_]
|
| 1041 |
+
for word in words_:
|
| 1042 |
+
ind = get_word_inds(prompt, word, tokenizer)
|
| 1043 |
+
alpha_layers[i, :, :, :, :, ind] = 1
|
| 1044 |
+
self.alpha_layers = alpha_layers.to(device)
|
| 1045 |
+
self.threshold = threshold
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
class AttentionControlEdit(AttentionStore, abc.ABC):
|
| 1049 |
+
def step_callback(self, x_t):
|
| 1050 |
+
if self.local_blend is not None:
|
| 1051 |
+
x_t = self.local_blend(x_t, self.attention_store)
|
| 1052 |
+
return x_t
|
| 1053 |
+
|
| 1054 |
+
def replace_self_attention(self, attn_base, att_replace):
|
| 1055 |
+
if att_replace.shape[2] <= 16**2:
|
| 1056 |
+
return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)
|
| 1057 |
+
else:
|
| 1058 |
+
return att_replace
|
| 1059 |
+
|
| 1060 |
+
@abc.abstractmethod
|
| 1061 |
+
def replace_cross_attention(self, attn_base, att_replace):
|
| 1062 |
+
raise NotImplementedError
|
| 1063 |
+
|
| 1064 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 1065 |
+
super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
|
| 1066 |
+
# FIXME not replace correctly
|
| 1067 |
+
if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
|
| 1068 |
+
h = attn.shape[0] // (self.batch_size)
|
| 1069 |
+
attn = attn.reshape(self.batch_size, h, *attn.shape[1:])
|
| 1070 |
+
attn_base, attn_repalce = attn[0], attn[1:]
|
| 1071 |
+
if is_cross:
|
| 1072 |
+
alpha_words = self.cross_replace_alpha[self.cur_step]
|
| 1073 |
+
attn_repalce_new = (
|
| 1074 |
+
self.replace_cross_attention(attn_base, attn_repalce) * alpha_words
|
| 1075 |
+
+ (1 - alpha_words) * attn_repalce
|
| 1076 |
+
)
|
| 1077 |
+
attn[1:] = attn_repalce_new
|
| 1078 |
+
else:
|
| 1079 |
+
attn[1:] = self.replace_self_attention(attn_base, attn_repalce)
|
| 1080 |
+
attn = attn.reshape(self.batch_size * h, *attn.shape[2:])
|
| 1081 |
+
return attn
|
| 1082 |
+
|
| 1083 |
+
def __init__(
|
| 1084 |
+
self,
|
| 1085 |
+
prompts,
|
| 1086 |
+
num_steps: int,
|
| 1087 |
+
cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
|
| 1088 |
+
self_replace_steps: Union[float, Tuple[float, float]],
|
| 1089 |
+
local_blend: Optional[LocalBlend],
|
| 1090 |
+
tokenizer,
|
| 1091 |
+
device,
|
| 1092 |
+
):
|
| 1093 |
+
super(AttentionControlEdit, self).__init__()
|
| 1094 |
+
# add tokenizer and device here
|
| 1095 |
+
|
| 1096 |
+
self.tokenizer = tokenizer
|
| 1097 |
+
self.device = device
|
| 1098 |
+
|
| 1099 |
+
self.batch_size = len(prompts)
|
| 1100 |
+
self.cross_replace_alpha = get_time_words_attention_alpha(
|
| 1101 |
+
prompts, num_steps, cross_replace_steps, self.tokenizer
|
| 1102 |
+
).to(self.device)
|
| 1103 |
+
if isinstance(self_replace_steps, float):
|
| 1104 |
+
self_replace_steps = 0, self_replace_steps
|
| 1105 |
+
self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
|
| 1106 |
+
self.local_blend = local_blend # 在外面定义后传进来
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
class AttentionReplace(AttentionControlEdit):
|
| 1110 |
+
def replace_cross_attention(self, attn_base, att_replace):
|
| 1111 |
+
return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper)
|
| 1112 |
+
|
| 1113 |
+
def __init__(
|
| 1114 |
+
self,
|
| 1115 |
+
prompts,
|
| 1116 |
+
num_steps: int,
|
| 1117 |
+
cross_replace_steps: float,
|
| 1118 |
+
self_replace_steps: float,
|
| 1119 |
+
local_blend: Optional[LocalBlend] = None,
|
| 1120 |
+
tokenizer=None,
|
| 1121 |
+
device=None,
|
| 1122 |
+
):
|
| 1123 |
+
super(AttentionReplace, self).__init__(
|
| 1124 |
+
prompts,
|
| 1125 |
+
num_steps,
|
| 1126 |
+
cross_replace_steps,
|
| 1127 |
+
self_replace_steps,
|
| 1128 |
+
local_blend,
|
| 1129 |
+
tokenizer,
|
| 1130 |
+
device,
|
| 1131 |
+
)
|
| 1132 |
+
self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device)
|
| 1133 |
+
|
| 1134 |
+
|
| 1135 |
+
class AttentionRefine(AttentionControlEdit):
|
| 1136 |
+
def replace_cross_attention(self, attn_base, att_replace):
|
| 1137 |
+
attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)
|
| 1138 |
+
attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
|
| 1139 |
+
return attn_replace
|
| 1140 |
+
|
| 1141 |
+
def __init__(
|
| 1142 |
+
self,
|
| 1143 |
+
prompts,
|
| 1144 |
+
num_steps: int,
|
| 1145 |
+
cross_replace_steps: float,
|
| 1146 |
+
self_replace_steps: float,
|
| 1147 |
+
local_blend: Optional[LocalBlend] = None,
|
| 1148 |
+
tokenizer=None,
|
| 1149 |
+
device=None,
|
| 1150 |
+
):
|
| 1151 |
+
super(AttentionRefine, self).__init__(
|
| 1152 |
+
prompts,
|
| 1153 |
+
num_steps,
|
| 1154 |
+
cross_replace_steps,
|
| 1155 |
+
self_replace_steps,
|
| 1156 |
+
local_blend,
|
| 1157 |
+
tokenizer,
|
| 1158 |
+
device,
|
| 1159 |
+
)
|
| 1160 |
+
self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer)
|
| 1161 |
+
self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device)
|
| 1162 |
+
self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
class AttentionReweight(AttentionControlEdit):
|
| 1166 |
+
def replace_cross_attention(self, attn_base, att_replace):
|
| 1167 |
+
if self.prev_controller is not None:
|
| 1168 |
+
attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
|
| 1169 |
+
attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
|
| 1170 |
+
return attn_replace
|
| 1171 |
+
|
| 1172 |
+
def __init__(
|
| 1173 |
+
self,
|
| 1174 |
+
prompts,
|
| 1175 |
+
num_steps: int,
|
| 1176 |
+
cross_replace_steps: float,
|
| 1177 |
+
self_replace_steps: float,
|
| 1178 |
+
equalizer,
|
| 1179 |
+
local_blend: Optional[LocalBlend] = None,
|
| 1180 |
+
controller: Optional[AttentionControlEdit] = None,
|
| 1181 |
+
tokenizer=None,
|
| 1182 |
+
device=None,
|
| 1183 |
+
):
|
| 1184 |
+
super(AttentionReweight, self).__init__(
|
| 1185 |
+
prompts,
|
| 1186 |
+
num_steps,
|
| 1187 |
+
cross_replace_steps,
|
| 1188 |
+
self_replace_steps,
|
| 1189 |
+
local_blend,
|
| 1190 |
+
tokenizer,
|
| 1191 |
+
device,
|
| 1192 |
+
)
|
| 1193 |
+
self.equalizer = equalizer.to(self.device)
|
| 1194 |
+
self.prev_controller = controller
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
### util functions for all Edits
|
| 1198 |
+
def update_alpha_time_word(
|
| 1199 |
+
alpha,
|
| 1200 |
+
bounds: Union[float, Tuple[float, float]],
|
| 1201 |
+
prompt_ind: int,
|
| 1202 |
+
word_inds: Optional[torch.Tensor] = None,
|
| 1203 |
+
):
|
| 1204 |
+
if isinstance(bounds, float):
|
| 1205 |
+
bounds = 0, bounds
|
| 1206 |
+
start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
|
| 1207 |
+
if word_inds is None:
|
| 1208 |
+
word_inds = torch.arange(alpha.shape[2])
|
| 1209 |
+
alpha[:start, prompt_ind, word_inds] = 0
|
| 1210 |
+
alpha[start:end, prompt_ind, word_inds] = 1
|
| 1211 |
+
alpha[end:, prompt_ind, word_inds] = 0
|
| 1212 |
+
return alpha
|
| 1213 |
+
|
| 1214 |
+
|
| 1215 |
+
def get_time_words_attention_alpha(
|
| 1216 |
+
prompts,
|
| 1217 |
+
num_steps,
|
| 1218 |
+
cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]],
|
| 1219 |
+
tokenizer,
|
| 1220 |
+
max_num_words=77,
|
| 1221 |
+
):
|
| 1222 |
+
if not isinstance(cross_replace_steps, dict):
|
| 1223 |
+
cross_replace_steps = {"default_": cross_replace_steps}
|
| 1224 |
+
if "default_" not in cross_replace_steps:
|
| 1225 |
+
cross_replace_steps["default_"] = (0.0, 1.0)
|
| 1226 |
+
alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
|
| 1227 |
+
for i in range(len(prompts) - 1):
|
| 1228 |
+
alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], i)
|
| 1229 |
+
for key, item in cross_replace_steps.items():
|
| 1230 |
+
if key != "default_":
|
| 1231 |
+
inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
|
| 1232 |
+
for i, ind in enumerate(inds):
|
| 1233 |
+
if len(ind) > 0:
|
| 1234 |
+
alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
|
| 1235 |
+
alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words)
|
| 1236 |
+
return alpha_time_words
|
| 1237 |
+
|
| 1238 |
+
|
| 1239 |
+
### util functions for LocalBlend and ReplacementEdit
|
| 1240 |
+
def get_word_inds(text: str, word_place: int, tokenizer):
|
| 1241 |
+
split_text = text.split(" ")
|
| 1242 |
+
if isinstance(word_place, str):
|
| 1243 |
+
word_place = [i for i, word in enumerate(split_text) if word_place == word]
|
| 1244 |
+
elif isinstance(word_place, int):
|
| 1245 |
+
word_place = [word_place]
|
| 1246 |
+
out = []
|
| 1247 |
+
if len(word_place) > 0:
|
| 1248 |
+
words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
|
| 1249 |
+
cur_len, ptr = 0, 0
|
| 1250 |
+
|
| 1251 |
+
for i in range(len(words_encode)):
|
| 1252 |
+
cur_len += len(words_encode[i])
|
| 1253 |
+
if ptr in word_place:
|
| 1254 |
+
out.append(i + 1)
|
| 1255 |
+
if cur_len >= len(split_text[ptr]):
|
| 1256 |
+
ptr += 1
|
| 1257 |
+
cur_len = 0
|
| 1258 |
+
return np.array(out)
|
| 1259 |
+
|
| 1260 |
+
|
| 1261 |
+
### util functions for ReplacementEdit
|
| 1262 |
+
def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77):
|
| 1263 |
+
words_x = x.split(" ")
|
| 1264 |
+
words_y = y.split(" ")
|
| 1265 |
+
if len(words_x) != len(words_y):
|
| 1266 |
+
raise ValueError(
|
| 1267 |
+
f"attention replacement edit can only be applied on prompts with the same length"
|
| 1268 |
+
f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words."
|
| 1269 |
+
)
|
| 1270 |
+
inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]]
|
| 1271 |
+
inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace]
|
| 1272 |
+
inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace]
|
| 1273 |
+
mapper = np.zeros((max_len, max_len))
|
| 1274 |
+
i = j = 0
|
| 1275 |
+
cur_inds = 0
|
| 1276 |
+
while i < max_len and j < max_len:
|
| 1277 |
+
if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i:
|
| 1278 |
+
inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds]
|
| 1279 |
+
if len(inds_source_) == len(inds_target_):
|
| 1280 |
+
mapper[inds_source_, inds_target_] = 1
|
| 1281 |
+
else:
|
| 1282 |
+
ratio = 1 / len(inds_target_)
|
| 1283 |
+
for i_t in inds_target_:
|
| 1284 |
+
mapper[inds_source_, i_t] = ratio
|
| 1285 |
+
cur_inds += 1
|
| 1286 |
+
i += len(inds_source_)
|
| 1287 |
+
j += len(inds_target_)
|
| 1288 |
+
elif cur_inds < len(inds_source):
|
| 1289 |
+
mapper[i, j] = 1
|
| 1290 |
+
i += 1
|
| 1291 |
+
j += 1
|
| 1292 |
+
else:
|
| 1293 |
+
mapper[j, j] = 1
|
| 1294 |
+
i += 1
|
| 1295 |
+
j += 1
|
| 1296 |
+
|
| 1297 |
+
return torch.from_numpy(mapper).float()
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
def get_replacement_mapper(prompts, tokenizer, max_len=77):
|
| 1301 |
+
x_seq = prompts[0]
|
| 1302 |
+
mappers = []
|
| 1303 |
+
for i in range(1, len(prompts)):
|
| 1304 |
+
mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len)
|
| 1305 |
+
mappers.append(mapper)
|
| 1306 |
+
return torch.stack(mappers)
|
| 1307 |
+
|
| 1308 |
+
|
| 1309 |
+
### util functions for ReweightEdit
|
| 1310 |
+
def get_equalizer(
|
| 1311 |
+
text: str,
|
| 1312 |
+
word_select: Union[int, Tuple[int, ...]],
|
| 1313 |
+
values: Union[List[float], Tuple[float, ...]],
|
| 1314 |
+
tokenizer,
|
| 1315 |
+
):
|
| 1316 |
+
if isinstance(word_select, (int, str)):
|
| 1317 |
+
word_select = (word_select,)
|
| 1318 |
+
equalizer = torch.ones(len(values), 77)
|
| 1319 |
+
values = torch.tensor(values, dtype=torch.float32)
|
| 1320 |
+
for word in word_select:
|
| 1321 |
+
inds = get_word_inds(text, word, tokenizer)
|
| 1322 |
+
equalizer[:, inds] = values
|
| 1323 |
+
return equalizer
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
### util functions for RefinementEdit
|
| 1327 |
+
class ScoreParams:
|
| 1328 |
+
def __init__(self, gap, match, mismatch):
|
| 1329 |
+
self.gap = gap
|
| 1330 |
+
self.match = match
|
| 1331 |
+
self.mismatch = mismatch
|
| 1332 |
+
|
| 1333 |
+
def mis_match_char(self, x, y):
|
| 1334 |
+
if x != y:
|
| 1335 |
+
return self.mismatch
|
| 1336 |
+
else:
|
| 1337 |
+
return self.match
|
| 1338 |
+
|
| 1339 |
+
|
| 1340 |
+
def get_matrix(size_x, size_y, gap):
|
| 1341 |
+
matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
|
| 1342 |
+
matrix[0, 1:] = (np.arange(size_y) + 1) * gap
|
| 1343 |
+
matrix[1:, 0] = (np.arange(size_x) + 1) * gap
|
| 1344 |
+
return matrix
|
| 1345 |
+
|
| 1346 |
+
|
| 1347 |
+
def get_traceback_matrix(size_x, size_y):
|
| 1348 |
+
matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
|
| 1349 |
+
matrix[0, 1:] = 1
|
| 1350 |
+
matrix[1:, 0] = 2
|
| 1351 |
+
matrix[0, 0] = 4
|
| 1352 |
+
return matrix
|
| 1353 |
+
|
| 1354 |
+
|
| 1355 |
+
def global_align(x, y, score):
|
| 1356 |
+
matrix = get_matrix(len(x), len(y), score.gap)
|
| 1357 |
+
trace_back = get_traceback_matrix(len(x), len(y))
|
| 1358 |
+
for i in range(1, len(x) + 1):
|
| 1359 |
+
for j in range(1, len(y) + 1):
|
| 1360 |
+
left = matrix[i, j - 1] + score.gap
|
| 1361 |
+
up = matrix[i - 1, j] + score.gap
|
| 1362 |
+
diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1])
|
| 1363 |
+
matrix[i, j] = max(left, up, diag)
|
| 1364 |
+
if matrix[i, j] == left:
|
| 1365 |
+
trace_back[i, j] = 1
|
| 1366 |
+
elif matrix[i, j] == up:
|
| 1367 |
+
trace_back[i, j] = 2
|
| 1368 |
+
else:
|
| 1369 |
+
trace_back[i, j] = 3
|
| 1370 |
+
return matrix, trace_back
|
| 1371 |
+
|
| 1372 |
+
|
| 1373 |
+
def get_aligned_sequences(x, y, trace_back):
|
| 1374 |
+
x_seq = []
|
| 1375 |
+
y_seq = []
|
| 1376 |
+
i = len(x)
|
| 1377 |
+
j = len(y)
|
| 1378 |
+
mapper_y_to_x = []
|
| 1379 |
+
while i > 0 or j > 0:
|
| 1380 |
+
if trace_back[i, j] == 3:
|
| 1381 |
+
x_seq.append(x[i - 1])
|
| 1382 |
+
y_seq.append(y[j - 1])
|
| 1383 |
+
i = i - 1
|
| 1384 |
+
j = j - 1
|
| 1385 |
+
mapper_y_to_x.append((j, i))
|
| 1386 |
+
elif trace_back[i][j] == 1:
|
| 1387 |
+
x_seq.append("-")
|
| 1388 |
+
y_seq.append(y[j - 1])
|
| 1389 |
+
j = j - 1
|
| 1390 |
+
mapper_y_to_x.append((j, -1))
|
| 1391 |
+
elif trace_back[i][j] == 2:
|
| 1392 |
+
x_seq.append(x[i - 1])
|
| 1393 |
+
y_seq.append("-")
|
| 1394 |
+
i = i - 1
|
| 1395 |
+
elif trace_back[i][j] == 4:
|
| 1396 |
+
break
|
| 1397 |
+
mapper_y_to_x.reverse()
|
| 1398 |
+
return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64)
|
| 1399 |
+
|
| 1400 |
+
|
| 1401 |
+
def get_mapper(x: str, y: str, tokenizer, max_len=77):
|
| 1402 |
+
x_seq = tokenizer.encode(x)
|
| 1403 |
+
y_seq = tokenizer.encode(y)
|
| 1404 |
+
score = ScoreParams(0, 1, -1)
|
| 1405 |
+
matrix, trace_back = global_align(x_seq, y_seq, score)
|
| 1406 |
+
mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1]
|
| 1407 |
+
alphas = torch.ones(max_len)
|
| 1408 |
+
alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float()
|
| 1409 |
+
mapper = torch.zeros(max_len, dtype=torch.int64)
|
| 1410 |
+
mapper[: mapper_base.shape[0]] = mapper_base[:, 1]
|
| 1411 |
+
mapper[mapper_base.shape[0] :] = len(y_seq) + torch.arange(max_len - len(y_seq))
|
| 1412 |
+
return mapper, alphas
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
def get_refinement_mapper(prompts, tokenizer, max_len=77):
|
| 1416 |
+
x_seq = prompts[0]
|
| 1417 |
+
mappers, alphas = [], []
|
| 1418 |
+
for i in range(1, len(prompts)):
|
| 1419 |
+
mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len)
|
| 1420 |
+
mappers.append(mapper)
|
| 1421 |
+
alphas.append(alpha)
|
| 1422 |
+
return torch.stack(mappers), torch.stack(alphas)
|
v0.27.0/pipeline_sdxl_style_aligned.py
ADDED
|
@@ -0,0 +1,1906 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
# Based on [Style Aligned Image Generation via Shared Attention](https://arxiv.org/abs/2312.02133).
|
| 16 |
+
# Authors: Amir Hertz, Andrey Voynov, Shlomi Fruchter, Daniel Cohen-Or
|
| 17 |
+
# Project Page: https://style-aligned-gen.github.io/
|
| 18 |
+
# Code: https://github.com/google/style-aligned
|
| 19 |
+
#
|
| 20 |
+
# Adapted to Diffusers by [Aryan V S](https://github.com/a-r-r-o-w/).
|
| 21 |
+
|
| 22 |
+
import inspect
|
| 23 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
import torch.nn as nn
|
| 27 |
+
import torch.nn.functional as F
|
| 28 |
+
from PIL import Image
|
| 29 |
+
from transformers import (
|
| 30 |
+
CLIPImageProcessor,
|
| 31 |
+
CLIPTextModel,
|
| 32 |
+
CLIPTextModelWithProjection,
|
| 33 |
+
CLIPTokenizer,
|
| 34 |
+
CLIPVisionModelWithProjection,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 38 |
+
from diffusers.loaders import (
|
| 39 |
+
FromSingleFileMixin,
|
| 40 |
+
IPAdapterMixin,
|
| 41 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 42 |
+
TextualInversionLoaderMixin,
|
| 43 |
+
)
|
| 44 |
+
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
|
| 45 |
+
from diffusers.models.attention_processor import (
|
| 46 |
+
Attention,
|
| 47 |
+
AttnProcessor2_0,
|
| 48 |
+
FusedAttnProcessor2_0,
|
| 49 |
+
LoRAAttnProcessor2_0,
|
| 50 |
+
LoRAXFormersAttnProcessor,
|
| 51 |
+
XFormersAttnProcessor,
|
| 52 |
+
)
|
| 53 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 54 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 55 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 56 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 57 |
+
from diffusers.utils import (
|
| 58 |
+
USE_PEFT_BACKEND,
|
| 59 |
+
deprecate,
|
| 60 |
+
is_invisible_watermark_available,
|
| 61 |
+
is_torch_xla_available,
|
| 62 |
+
logging,
|
| 63 |
+
replace_example_docstring,
|
| 64 |
+
scale_lora_layers,
|
| 65 |
+
unscale_lora_layers,
|
| 66 |
+
)
|
| 67 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
if is_invisible_watermark_available():
|
| 71 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 72 |
+
|
| 73 |
+
if is_torch_xla_available():
|
| 74 |
+
import torch_xla.core.xla_model as xm
|
| 75 |
+
|
| 76 |
+
XLA_AVAILABLE = True
|
| 77 |
+
else:
|
| 78 |
+
XLA_AVAILABLE = False
|
| 79 |
+
|
| 80 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 81 |
+
|
| 82 |
+
EXAMPLE_DOC_STRING = """
|
| 83 |
+
Examples:
|
| 84 |
+
```py
|
| 85 |
+
>>> from typing import List
|
| 86 |
+
|
| 87 |
+
>>> import torch
|
| 88 |
+
>>> from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 89 |
+
>>> from PIL import Image
|
| 90 |
+
|
| 91 |
+
>>> model_id = "a-r-r-o-w/dreamshaper-xl-turbo"
|
| 92 |
+
>>> pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", custom_pipeline="pipeline_sdxl_style_aligned")
|
| 93 |
+
>>> pipe = pipe.to("cuda")
|
| 94 |
+
|
| 95 |
+
# Enable memory saving techniques
|
| 96 |
+
>>> pipe.enable_vae_slicing()
|
| 97 |
+
>>> pipe.enable_vae_tiling()
|
| 98 |
+
|
| 99 |
+
>>> prompt = [
|
| 100 |
+
... "a toy train. macro photo. 3d game asset",
|
| 101 |
+
... "a toy airplane. macro photo. 3d game asset",
|
| 102 |
+
... "a toy bicycle. macro photo. 3d game asset",
|
| 103 |
+
... "a toy car. macro photo. 3d game asset",
|
| 104 |
+
... ]
|
| 105 |
+
>>> negative_prompt = "low quality, worst quality, "
|
| 106 |
+
|
| 107 |
+
>>> # Enable StyleAligned
|
| 108 |
+
>>> pipe.enable_style_aligned(
|
| 109 |
+
... share_group_norm=False,
|
| 110 |
+
... share_layer_norm=False,
|
| 111 |
+
... share_attention=True,
|
| 112 |
+
... adain_queries=True,
|
| 113 |
+
... adain_keys=True,
|
| 114 |
+
... adain_values=False,
|
| 115 |
+
... full_attention_share=False,
|
| 116 |
+
... shared_score_scale=1.0,
|
| 117 |
+
... shared_score_shift=0.0,
|
| 118 |
+
... only_self_level=0.0,
|
| 119 |
+
>>> )
|
| 120 |
+
|
| 121 |
+
>>> # Run inference
|
| 122 |
+
>>> images = pipe(
|
| 123 |
+
... prompt=prompt,
|
| 124 |
+
... negative_prompt=negative_prompt,
|
| 125 |
+
... guidance_scale=2,
|
| 126 |
+
... height=1024,
|
| 127 |
+
... width=1024,
|
| 128 |
+
... num_inference_steps=10,
|
| 129 |
+
... generator=torch.Generator().manual_seed(42),
|
| 130 |
+
>>> ).images
|
| 131 |
+
|
| 132 |
+
>>> # Disable StyleAligned if you do not wish to use it anymore
|
| 133 |
+
>>> pipe.disable_style_aligned()
|
| 134 |
+
```
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def expand_first(feat: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
|
| 139 |
+
b = feat.shape[0]
|
| 140 |
+
feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1)
|
| 141 |
+
if scale == 1:
|
| 142 |
+
feat_style = feat_style.expand(2, b // 2, *feat.shape[1:])
|
| 143 |
+
else:
|
| 144 |
+
feat_style = feat_style.repeat(1, b // 2, 1, 1, 1)
|
| 145 |
+
feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1)
|
| 146 |
+
return feat_style.reshape(*feat.shape)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch.Tensor:
|
| 150 |
+
feat_style = expand_first(feat, scale=scale)
|
| 151 |
+
return torch.cat((feat, feat_style), dim=dim)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> tuple[torch.Tensor, torch.Tensor]:
|
| 155 |
+
feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt()
|
| 156 |
+
feat_mean = feat.mean(dim=-2, keepdims=True)
|
| 157 |
+
return feat_mean, feat_std
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def adain(feat: torch.Tensor) -> torch.Tensor:
|
| 161 |
+
feat_mean, feat_std = calc_mean_std(feat)
|
| 162 |
+
feat_style_mean = expand_first(feat_mean)
|
| 163 |
+
feat_style_std = expand_first(feat_std)
|
| 164 |
+
feat = (feat - feat_mean) / feat_std
|
| 165 |
+
feat = feat * feat_style_std + feat_style_mean
|
| 166 |
+
return feat
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def get_switch_vec(total_num_layers, level):
|
| 170 |
+
if level == 0:
|
| 171 |
+
return torch.zeros(total_num_layers, dtype=torch.bool)
|
| 172 |
+
if level == 1:
|
| 173 |
+
return torch.ones(total_num_layers, dtype=torch.bool)
|
| 174 |
+
to_flip = level > 0.5
|
| 175 |
+
if to_flip:
|
| 176 |
+
level = 1 - level
|
| 177 |
+
num_switch = int(level * total_num_layers)
|
| 178 |
+
vec = torch.arange(total_num_layers)
|
| 179 |
+
vec = vec % (total_num_layers // num_switch)
|
| 180 |
+
vec = vec == 0
|
| 181 |
+
if to_flip:
|
| 182 |
+
vec = ~vec
|
| 183 |
+
return vec
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class SharedAttentionProcessor(AttnProcessor2_0):
|
| 187 |
+
def __init__(
|
| 188 |
+
self,
|
| 189 |
+
share_attention: bool = True,
|
| 190 |
+
adain_queries: bool = True,
|
| 191 |
+
adain_keys: bool = True,
|
| 192 |
+
adain_values: bool = False,
|
| 193 |
+
full_attention_share: bool = False,
|
| 194 |
+
shared_score_scale: float = 1.0,
|
| 195 |
+
shared_score_shift: float = 0.0,
|
| 196 |
+
):
|
| 197 |
+
r"""Shared Attention Processor as proposed in the StyleAligned paper."""
|
| 198 |
+
super().__init__()
|
| 199 |
+
self.share_attention = share_attention
|
| 200 |
+
self.adain_queries = adain_queries
|
| 201 |
+
self.adain_keys = adain_keys
|
| 202 |
+
self.adain_values = adain_values
|
| 203 |
+
self.full_attention_share = full_attention_share
|
| 204 |
+
self.shared_score_scale = shared_score_scale
|
| 205 |
+
self.shared_score_shift = shared_score_shift
|
| 206 |
+
|
| 207 |
+
def shifted_scaled_dot_product_attention(
|
| 208 |
+
self, attn: Attention, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
|
| 209 |
+
) -> torch.Tensor:
|
| 210 |
+
logits = torch.einsum("bhqd,bhkd->bhqk", query, key) * attn.scale
|
| 211 |
+
logits[:, :, :, query.shape[2] :] += self.shared_score_shift
|
| 212 |
+
probs = logits.softmax(-1)
|
| 213 |
+
return torch.einsum("bhqk,bhkd->bhqd", probs, value)
|
| 214 |
+
|
| 215 |
+
def shared_call(
|
| 216 |
+
self,
|
| 217 |
+
attn: Attention,
|
| 218 |
+
hidden_states: torch.Tensor,
|
| 219 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 220 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 221 |
+
**kwargs,
|
| 222 |
+
):
|
| 223 |
+
residual = hidden_states
|
| 224 |
+
input_ndim = hidden_states.ndim
|
| 225 |
+
if input_ndim == 4:
|
| 226 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 227 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 228 |
+
batch_size, sequence_length, _ = (
|
| 229 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
if attention_mask is not None:
|
| 233 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 234 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 235 |
+
# (batch, heads, source_length, target_length)
|
| 236 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 237 |
+
|
| 238 |
+
if attn.group_norm is not None:
|
| 239 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 240 |
+
|
| 241 |
+
query = attn.to_q(hidden_states)
|
| 242 |
+
key = attn.to_k(hidden_states)
|
| 243 |
+
value = attn.to_v(hidden_states)
|
| 244 |
+
inner_dim = key.shape[-1]
|
| 245 |
+
head_dim = inner_dim // attn.heads
|
| 246 |
+
|
| 247 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 248 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 249 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 250 |
+
|
| 251 |
+
if self.adain_queries:
|
| 252 |
+
query = adain(query)
|
| 253 |
+
if self.adain_keys:
|
| 254 |
+
key = adain(key)
|
| 255 |
+
if self.adain_values:
|
| 256 |
+
value = adain(value)
|
| 257 |
+
if self.share_attention:
|
| 258 |
+
key = concat_first(key, -2, scale=self.shared_score_scale)
|
| 259 |
+
value = concat_first(value, -2)
|
| 260 |
+
if self.shared_score_shift != 0:
|
| 261 |
+
hidden_states = self.shifted_scaled_dot_product_attention(attn, query, key, value)
|
| 262 |
+
else:
|
| 263 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 264 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 265 |
+
)
|
| 266 |
+
else:
|
| 267 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 268 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 272 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 273 |
+
|
| 274 |
+
# linear proj
|
| 275 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 276 |
+
# dropout
|
| 277 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 278 |
+
|
| 279 |
+
if input_ndim == 4:
|
| 280 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 281 |
+
|
| 282 |
+
if attn.residual_connection:
|
| 283 |
+
hidden_states = hidden_states + residual
|
| 284 |
+
|
| 285 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 286 |
+
return hidden_states
|
| 287 |
+
|
| 288 |
+
def __call__(
|
| 289 |
+
self,
|
| 290 |
+
attn: Attention,
|
| 291 |
+
hidden_states: torch.Tensor,
|
| 292 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 293 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 294 |
+
**kwargs,
|
| 295 |
+
):
|
| 296 |
+
if self.full_attention_share:
|
| 297 |
+
b, n, d = hidden_states.shape
|
| 298 |
+
k = 2
|
| 299 |
+
hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d)
|
| 300 |
+
# hidden_states = einops.rearrange(hidden_states, "(k b) n d -> k (b n) d", k=2)
|
| 301 |
+
hidden_states = super().__call__(
|
| 302 |
+
attn,
|
| 303 |
+
hidden_states,
|
| 304 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 305 |
+
attention_mask=attention_mask,
|
| 306 |
+
**kwargs,
|
| 307 |
+
)
|
| 308 |
+
hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d)
|
| 309 |
+
# hidden_states = einops.rearrange(hidden_states, "k (b n) d -> (k b) n d", n=n)
|
| 310 |
+
else:
|
| 311 |
+
hidden_states = self.shared_call(attn, hidden_states, hidden_states, attention_mask, **kwargs)
|
| 312 |
+
|
| 313 |
+
return hidden_states
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 317 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 318 |
+
"""
|
| 319 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 320 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 321 |
+
"""
|
| 322 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 323 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 324 |
+
# rescale the results from guidance (fixes overexposure)
|
| 325 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 326 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 327 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 328 |
+
return noise_cfg
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 332 |
+
def retrieve_timesteps(
|
| 333 |
+
scheduler,
|
| 334 |
+
num_inference_steps: Optional[int] = None,
|
| 335 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 336 |
+
timesteps: Optional[List[int]] = None,
|
| 337 |
+
**kwargs,
|
| 338 |
+
):
|
| 339 |
+
"""
|
| 340 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 341 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
scheduler (`SchedulerMixin`):
|
| 345 |
+
The scheduler to get timesteps from.
|
| 346 |
+
num_inference_steps (`int`):
|
| 347 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 348 |
+
`timesteps` must be `None`.
|
| 349 |
+
device (`str` or `torch.device`, *optional*):
|
| 350 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 351 |
+
timesteps (`List[int]`, *optional*):
|
| 352 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 353 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 354 |
+
must be `None`.
|
| 355 |
+
|
| 356 |
+
Returns:
|
| 357 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 358 |
+
second element is the number of inference steps.
|
| 359 |
+
"""
|
| 360 |
+
if timesteps is not None:
|
| 361 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 362 |
+
if not accepts_timesteps:
|
| 363 |
+
raise ValueError(
|
| 364 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 365 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 366 |
+
)
|
| 367 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 368 |
+
timesteps = scheduler.timesteps
|
| 369 |
+
num_inference_steps = len(timesteps)
|
| 370 |
+
else:
|
| 371 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 372 |
+
timesteps = scheduler.timesteps
|
| 373 |
+
return timesteps, num_inference_steps
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 377 |
+
def retrieve_latents(
|
| 378 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 379 |
+
):
|
| 380 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 381 |
+
return encoder_output.latent_dist.sample(generator)
|
| 382 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 383 |
+
return encoder_output.latent_dist.mode()
|
| 384 |
+
elif hasattr(encoder_output, "latents"):
|
| 385 |
+
return encoder_output.latents
|
| 386 |
+
else:
|
| 387 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class StyleAlignedSDXLPipeline(
|
| 391 |
+
DiffusionPipeline,
|
| 392 |
+
StableDiffusionMixin,
|
| 393 |
+
FromSingleFileMixin,
|
| 394 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 395 |
+
TextualInversionLoaderMixin,
|
| 396 |
+
IPAdapterMixin,
|
| 397 |
+
):
|
| 398 |
+
r"""
|
| 399 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
| 400 |
+
|
| 401 |
+
This pipeline also adds experimental support for [StyleAligned](https://arxiv.org/abs/2312.02133). It can
|
| 402 |
+
be enabled/disabled using `.enable_style_aligned()` or `.disable_style_aligned()` respectively.
|
| 403 |
+
|
| 404 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 405 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 406 |
+
|
| 407 |
+
The pipeline also inherits the following loading methods:
|
| 408 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 409 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 410 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 411 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 412 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 413 |
+
|
| 414 |
+
Args:
|
| 415 |
+
vae ([`AutoencoderKL`]):
|
| 416 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 417 |
+
text_encoder ([`CLIPTextModel`]):
|
| 418 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 419 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 420 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 421 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 422 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 423 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 424 |
+
specifically the
|
| 425 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 426 |
+
variant.
|
| 427 |
+
tokenizer (`CLIPTokenizer`):
|
| 428 |
+
Tokenizer of class
|
| 429 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 430 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 431 |
+
Second Tokenizer of class
|
| 432 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 433 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 434 |
+
scheduler ([`SchedulerMixin`]):
|
| 435 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 436 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 437 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 438 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 439 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 440 |
+
add_watermarker (`bool`, *optional*):
|
| 441 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 442 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 443 |
+
watermarker will be used.
|
| 444 |
+
"""
|
| 445 |
+
|
| 446 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
|
| 447 |
+
_optional_components = [
|
| 448 |
+
"tokenizer",
|
| 449 |
+
"tokenizer_2",
|
| 450 |
+
"text_encoder",
|
| 451 |
+
"text_encoder_2",
|
| 452 |
+
"image_encoder",
|
| 453 |
+
"feature_extractor",
|
| 454 |
+
]
|
| 455 |
+
_callback_tensor_inputs = [
|
| 456 |
+
"latents",
|
| 457 |
+
"prompt_embeds",
|
| 458 |
+
"negative_prompt_embeds",
|
| 459 |
+
"add_text_embeds",
|
| 460 |
+
"add_time_ids",
|
| 461 |
+
"negative_pooled_prompt_embeds",
|
| 462 |
+
"negative_add_time_ids",
|
| 463 |
+
]
|
| 464 |
+
|
| 465 |
+
def __init__(
|
| 466 |
+
self,
|
| 467 |
+
vae: AutoencoderKL,
|
| 468 |
+
text_encoder: CLIPTextModel,
|
| 469 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 470 |
+
tokenizer: CLIPTokenizer,
|
| 471 |
+
tokenizer_2: CLIPTokenizer,
|
| 472 |
+
unet: UNet2DConditionModel,
|
| 473 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 474 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 475 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 476 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 477 |
+
add_watermarker: Optional[bool] = None,
|
| 478 |
+
):
|
| 479 |
+
super().__init__()
|
| 480 |
+
|
| 481 |
+
self.register_modules(
|
| 482 |
+
vae=vae,
|
| 483 |
+
text_encoder=text_encoder,
|
| 484 |
+
text_encoder_2=text_encoder_2,
|
| 485 |
+
tokenizer=tokenizer,
|
| 486 |
+
tokenizer_2=tokenizer_2,
|
| 487 |
+
unet=unet,
|
| 488 |
+
scheduler=scheduler,
|
| 489 |
+
image_encoder=image_encoder,
|
| 490 |
+
feature_extractor=feature_extractor,
|
| 491 |
+
)
|
| 492 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 493 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 494 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 495 |
+
self.mask_processor = VaeImageProcessor(
|
| 496 |
+
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 500 |
+
|
| 501 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 502 |
+
|
| 503 |
+
if add_watermarker:
|
| 504 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 505 |
+
else:
|
| 506 |
+
self.watermark = None
|
| 507 |
+
|
| 508 |
+
def encode_prompt(
|
| 509 |
+
self,
|
| 510 |
+
prompt: str,
|
| 511 |
+
prompt_2: Optional[str] = None,
|
| 512 |
+
device: Optional[torch.device] = None,
|
| 513 |
+
num_images_per_prompt: int = 1,
|
| 514 |
+
do_classifier_free_guidance: bool = True,
|
| 515 |
+
negative_prompt: Optional[str] = None,
|
| 516 |
+
negative_prompt_2: Optional[str] = None,
|
| 517 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 518 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 519 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 520 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 521 |
+
lora_scale: Optional[float] = None,
|
| 522 |
+
clip_skip: Optional[int] = None,
|
| 523 |
+
):
|
| 524 |
+
r"""
|
| 525 |
+
Encodes the prompt into text encoder hidden states.
|
| 526 |
+
|
| 527 |
+
Args:
|
| 528 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 529 |
+
prompt to be encoded
|
| 530 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 531 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 532 |
+
used in both text-encoders
|
| 533 |
+
device: (`torch.device`):
|
| 534 |
+
torch device
|
| 535 |
+
num_images_per_prompt (`int`):
|
| 536 |
+
number of images that should be generated per prompt
|
| 537 |
+
do_classifier_free_guidance (`bool`):
|
| 538 |
+
whether to use classifier free guidance or not
|
| 539 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 540 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 541 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 542 |
+
less than `1`).
|
| 543 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 544 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 545 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 546 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 547 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 548 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 549 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 550 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 551 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 552 |
+
argument.
|
| 553 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 554 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 555 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 556 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 557 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 558 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 559 |
+
input argument.
|
| 560 |
+
lora_scale (`float`, *optional*):
|
| 561 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 562 |
+
clip_skip (`int`, *optional*):
|
| 563 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 564 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 565 |
+
"""
|
| 566 |
+
device = device or self._execution_device
|
| 567 |
+
|
| 568 |
+
# set lora scale so that monkey patched LoRA
|
| 569 |
+
# function of text encoder can correctly access it
|
| 570 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 571 |
+
self._lora_scale = lora_scale
|
| 572 |
+
|
| 573 |
+
# dynamically adjust the LoRA scale
|
| 574 |
+
if self.text_encoder is not None:
|
| 575 |
+
if not USE_PEFT_BACKEND:
|
| 576 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 577 |
+
else:
|
| 578 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 579 |
+
|
| 580 |
+
if self.text_encoder_2 is not None:
|
| 581 |
+
if not USE_PEFT_BACKEND:
|
| 582 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 583 |
+
else:
|
| 584 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 585 |
+
|
| 586 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 587 |
+
|
| 588 |
+
if prompt is not None:
|
| 589 |
+
batch_size = len(prompt)
|
| 590 |
+
else:
|
| 591 |
+
batch_size = prompt_embeds.shape[0]
|
| 592 |
+
|
| 593 |
+
# Define tokenizers and text encoders
|
| 594 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 595 |
+
text_encoders = (
|
| 596 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
if prompt_embeds is None:
|
| 600 |
+
prompt_2 = prompt_2 or prompt
|
| 601 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 602 |
+
|
| 603 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 604 |
+
prompt_embeds_list = []
|
| 605 |
+
prompts = [prompt, prompt_2]
|
| 606 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 607 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 608 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 609 |
+
|
| 610 |
+
text_inputs = tokenizer(
|
| 611 |
+
prompt,
|
| 612 |
+
padding="max_length",
|
| 613 |
+
max_length=tokenizer.model_max_length,
|
| 614 |
+
truncation=True,
|
| 615 |
+
return_tensors="pt",
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
text_input_ids = text_inputs.input_ids
|
| 619 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 620 |
+
|
| 621 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 622 |
+
text_input_ids, untruncated_ids
|
| 623 |
+
):
|
| 624 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 625 |
+
logger.warning(
|
| 626 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 627 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 631 |
+
|
| 632 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 633 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 634 |
+
if clip_skip is None:
|
| 635 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 636 |
+
else:
|
| 637 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 638 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 639 |
+
|
| 640 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 641 |
+
|
| 642 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 643 |
+
|
| 644 |
+
# get unconditional embeddings for classifier free guidance
|
| 645 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 646 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 647 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 648 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 649 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 650 |
+
negative_prompt = negative_prompt or ""
|
| 651 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 652 |
+
|
| 653 |
+
# normalize str to list
|
| 654 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 655 |
+
negative_prompt_2 = (
|
| 656 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
uncond_tokens: List[str]
|
| 660 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 661 |
+
raise TypeError(
|
| 662 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 663 |
+
f" {type(prompt)}."
|
| 664 |
+
)
|
| 665 |
+
elif batch_size != len(negative_prompt):
|
| 666 |
+
raise ValueError(
|
| 667 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 668 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 669 |
+
" the batch size of `prompt`."
|
| 670 |
+
)
|
| 671 |
+
else:
|
| 672 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 673 |
+
|
| 674 |
+
negative_prompt_embeds_list = []
|
| 675 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 676 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 677 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 678 |
+
|
| 679 |
+
max_length = prompt_embeds.shape[1]
|
| 680 |
+
uncond_input = tokenizer(
|
| 681 |
+
negative_prompt,
|
| 682 |
+
padding="max_length",
|
| 683 |
+
max_length=max_length,
|
| 684 |
+
truncation=True,
|
| 685 |
+
return_tensors="pt",
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
negative_prompt_embeds = text_encoder(
|
| 689 |
+
uncond_input.input_ids.to(device),
|
| 690 |
+
output_hidden_states=True,
|
| 691 |
+
)
|
| 692 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 693 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 694 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 695 |
+
|
| 696 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 697 |
+
|
| 698 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 699 |
+
|
| 700 |
+
if self.text_encoder_2 is not None:
|
| 701 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 702 |
+
else:
|
| 703 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 704 |
+
|
| 705 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 706 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 707 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 708 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 709 |
+
|
| 710 |
+
if do_classifier_free_guidance:
|
| 711 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 712 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 713 |
+
|
| 714 |
+
if self.text_encoder_2 is not None:
|
| 715 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 716 |
+
else:
|
| 717 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 718 |
+
|
| 719 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 720 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 721 |
+
|
| 722 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 723 |
+
bs_embed * num_images_per_prompt, -1
|
| 724 |
+
)
|
| 725 |
+
if do_classifier_free_guidance:
|
| 726 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 727 |
+
bs_embed * num_images_per_prompt, -1
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
if self.text_encoder is not None:
|
| 731 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 732 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 733 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 734 |
+
|
| 735 |
+
if self.text_encoder_2 is not None:
|
| 736 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 737 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 738 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 739 |
+
|
| 740 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 741 |
+
|
| 742 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 743 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 744 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 745 |
+
|
| 746 |
+
if not isinstance(image, torch.Tensor):
|
| 747 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 748 |
+
|
| 749 |
+
image = image.to(device=device, dtype=dtype)
|
| 750 |
+
if output_hidden_states:
|
| 751 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 752 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 753 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 754 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 755 |
+
).hidden_states[-2]
|
| 756 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 757 |
+
num_images_per_prompt, dim=0
|
| 758 |
+
)
|
| 759 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 760 |
+
else:
|
| 761 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 762 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 763 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 764 |
+
|
| 765 |
+
return image_embeds, uncond_image_embeds
|
| 766 |
+
|
| 767 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 768 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 769 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 770 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 771 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 772 |
+
# and should be between [0, 1]
|
| 773 |
+
|
| 774 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 775 |
+
extra_step_kwargs = {}
|
| 776 |
+
if accepts_eta:
|
| 777 |
+
extra_step_kwargs["eta"] = eta
|
| 778 |
+
|
| 779 |
+
# check if the scheduler accepts generator
|
| 780 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 781 |
+
if accepts_generator:
|
| 782 |
+
extra_step_kwargs["generator"] = generator
|
| 783 |
+
return extra_step_kwargs
|
| 784 |
+
|
| 785 |
+
def check_inputs(
|
| 786 |
+
self,
|
| 787 |
+
prompt,
|
| 788 |
+
prompt_2,
|
| 789 |
+
height,
|
| 790 |
+
width,
|
| 791 |
+
callback_steps,
|
| 792 |
+
negative_prompt=None,
|
| 793 |
+
negative_prompt_2=None,
|
| 794 |
+
prompt_embeds=None,
|
| 795 |
+
negative_prompt_embeds=None,
|
| 796 |
+
pooled_prompt_embeds=None,
|
| 797 |
+
negative_pooled_prompt_embeds=None,
|
| 798 |
+
callback_on_step_end_tensor_inputs=None,
|
| 799 |
+
):
|
| 800 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 801 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 802 |
+
|
| 803 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 804 |
+
raise ValueError(
|
| 805 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 806 |
+
f" {type(callback_steps)}."
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 810 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 811 |
+
):
|
| 812 |
+
raise ValueError(
|
| 813 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 814 |
+
)
|
| 815 |
+
|
| 816 |
+
if prompt is not None and prompt_embeds is not None:
|
| 817 |
+
raise ValueError(
|
| 818 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 819 |
+
" only forward one of the two."
|
| 820 |
+
)
|
| 821 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 822 |
+
raise ValueError(
|
| 823 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 824 |
+
" only forward one of the two."
|
| 825 |
+
)
|
| 826 |
+
elif prompt is None and prompt_embeds is None:
|
| 827 |
+
raise ValueError(
|
| 828 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 829 |
+
)
|
| 830 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 831 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 832 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 833 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 834 |
+
|
| 835 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 836 |
+
raise ValueError(
|
| 837 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 838 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 839 |
+
)
|
| 840 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 841 |
+
raise ValueError(
|
| 842 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 843 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 847 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 848 |
+
raise ValueError(
|
| 849 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 850 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 851 |
+
f" {negative_prompt_embeds.shape}."
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 855 |
+
raise ValueError(
|
| 856 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 860 |
+
raise ValueError(
|
| 861 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
|
| 865 |
+
# get the original timestep using init_timestep
|
| 866 |
+
if denoising_start is None:
|
| 867 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 868 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 869 |
+
else:
|
| 870 |
+
t_start = 0
|
| 871 |
+
|
| 872 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 873 |
+
|
| 874 |
+
# Strength is irrelevant if we directly request a timestep to start at;
|
| 875 |
+
# that is, strength is determined by the denoising_start instead.
|
| 876 |
+
if denoising_start is not None:
|
| 877 |
+
discrete_timestep_cutoff = int(
|
| 878 |
+
round(
|
| 879 |
+
self.scheduler.config.num_train_timesteps
|
| 880 |
+
- (denoising_start * self.scheduler.config.num_train_timesteps)
|
| 881 |
+
)
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
|
| 885 |
+
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
|
| 886 |
+
# if the scheduler is a 2nd order scheduler we might have to do +1
|
| 887 |
+
# because `num_inference_steps` might be even given that every timestep
|
| 888 |
+
# (except the highest one) is duplicated. If `num_inference_steps` is even it would
|
| 889 |
+
# mean that we cut the timesteps in the middle of the denoising step
|
| 890 |
+
# (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
|
| 891 |
+
# we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
|
| 892 |
+
num_inference_steps = num_inference_steps + 1
|
| 893 |
+
|
| 894 |
+
# because t_n+1 >= t_n, we slice the timesteps starting from the end
|
| 895 |
+
timesteps = timesteps[-num_inference_steps:]
|
| 896 |
+
return timesteps, num_inference_steps
|
| 897 |
+
|
| 898 |
+
return timesteps, num_inference_steps - t_start
|
| 899 |
+
|
| 900 |
+
def prepare_latents(
|
| 901 |
+
self,
|
| 902 |
+
image,
|
| 903 |
+
mask,
|
| 904 |
+
width,
|
| 905 |
+
height,
|
| 906 |
+
num_channels_latents,
|
| 907 |
+
timestep,
|
| 908 |
+
batch_size,
|
| 909 |
+
num_images_per_prompt,
|
| 910 |
+
dtype,
|
| 911 |
+
device,
|
| 912 |
+
generator=None,
|
| 913 |
+
add_noise=True,
|
| 914 |
+
latents=None,
|
| 915 |
+
is_strength_max=True,
|
| 916 |
+
return_noise=False,
|
| 917 |
+
return_image_latents=False,
|
| 918 |
+
):
|
| 919 |
+
batch_size *= num_images_per_prompt
|
| 920 |
+
|
| 921 |
+
if image is None:
|
| 922 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 923 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 924 |
+
raise ValueError(
|
| 925 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 926 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
if latents is None:
|
| 930 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 931 |
+
else:
|
| 932 |
+
latents = latents.to(device)
|
| 933 |
+
|
| 934 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 935 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 936 |
+
return latents
|
| 937 |
+
|
| 938 |
+
elif mask is None:
|
| 939 |
+
if not isinstance(image, (torch.Tensor, Image.Image, list)):
|
| 940 |
+
raise ValueError(
|
| 941 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 942 |
+
)
|
| 943 |
+
|
| 944 |
+
# Offload text encoder if `enable_model_cpu_offload` was enabled
|
| 945 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 946 |
+
self.text_encoder_2.to("cpu")
|
| 947 |
+
torch.cuda.empty_cache()
|
| 948 |
+
|
| 949 |
+
image = image.to(device=device, dtype=dtype)
|
| 950 |
+
|
| 951 |
+
if image.shape[1] == 4:
|
| 952 |
+
init_latents = image
|
| 953 |
+
|
| 954 |
+
else:
|
| 955 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 956 |
+
if self.vae.config.force_upcast:
|
| 957 |
+
image = image.float()
|
| 958 |
+
self.vae.to(dtype=torch.float32)
|
| 959 |
+
|
| 960 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 961 |
+
raise ValueError(
|
| 962 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 963 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
elif isinstance(generator, list):
|
| 967 |
+
init_latents = [
|
| 968 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 969 |
+
for i in range(batch_size)
|
| 970 |
+
]
|
| 971 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 972 |
+
else:
|
| 973 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 974 |
+
|
| 975 |
+
if self.vae.config.force_upcast:
|
| 976 |
+
self.vae.to(dtype)
|
| 977 |
+
|
| 978 |
+
init_latents = init_latents.to(dtype)
|
| 979 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 980 |
+
|
| 981 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 982 |
+
# expand init_latents for batch_size
|
| 983 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 984 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 985 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 986 |
+
raise ValueError(
|
| 987 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 988 |
+
)
|
| 989 |
+
else:
|
| 990 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 991 |
+
|
| 992 |
+
if add_noise:
|
| 993 |
+
shape = init_latents.shape
|
| 994 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 995 |
+
# get latents
|
| 996 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 997 |
+
|
| 998 |
+
latents = init_latents
|
| 999 |
+
return latents
|
| 1000 |
+
|
| 1001 |
+
else:
|
| 1002 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 1003 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 1004 |
+
raise ValueError(
|
| 1005 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 1006 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
if (image is None or timestep is None) and not is_strength_max:
|
| 1010 |
+
raise ValueError(
|
| 1011 |
+
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
|
| 1012 |
+
"However, either the image or the noise timestep has not been provided."
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
if image.shape[1] == 4:
|
| 1016 |
+
image_latents = image.to(device=device, dtype=dtype)
|
| 1017 |
+
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
|
| 1018 |
+
elif return_image_latents or (latents is None and not is_strength_max):
|
| 1019 |
+
image = image.to(device=device, dtype=dtype)
|
| 1020 |
+
image_latents = self._encode_vae_image(image=image, generator=generator)
|
| 1021 |
+
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
|
| 1022 |
+
|
| 1023 |
+
if latents is None and add_noise:
|
| 1024 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 1025 |
+
# if strength is 1. then initialise the latents to noise, else initial to image + noise
|
| 1026 |
+
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
|
| 1027 |
+
# if pure noise then scale the initial latents by the Scheduler's init sigma
|
| 1028 |
+
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
|
| 1029 |
+
elif add_noise:
|
| 1030 |
+
noise = latents.to(device)
|
| 1031 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 1032 |
+
else:
|
| 1033 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 1034 |
+
latents = image_latents.to(device)
|
| 1035 |
+
|
| 1036 |
+
outputs = (latents,)
|
| 1037 |
+
|
| 1038 |
+
if return_noise:
|
| 1039 |
+
outputs += (noise,)
|
| 1040 |
+
|
| 1041 |
+
if return_image_latents:
|
| 1042 |
+
outputs += (image_latents,)
|
| 1043 |
+
|
| 1044 |
+
return outputs
|
| 1045 |
+
|
| 1046 |
+
def prepare_mask_latents(
|
| 1047 |
+
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
|
| 1048 |
+
):
|
| 1049 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 1050 |
+
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
| 1051 |
+
# and half precision
|
| 1052 |
+
mask = torch.nn.functional.interpolate(
|
| 1053 |
+
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 1054 |
+
)
|
| 1055 |
+
mask = mask.to(device=device, dtype=dtype)
|
| 1056 |
+
|
| 1057 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 1058 |
+
if mask.shape[0] < batch_size:
|
| 1059 |
+
if not batch_size % mask.shape[0] == 0:
|
| 1060 |
+
raise ValueError(
|
| 1061 |
+
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
| 1062 |
+
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
| 1063 |
+
" of masks that you pass is divisible by the total requested batch size."
|
| 1064 |
+
)
|
| 1065 |
+
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
| 1066 |
+
|
| 1067 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 1068 |
+
|
| 1069 |
+
if masked_image is not None and masked_image.shape[1] == 4:
|
| 1070 |
+
masked_image_latents = masked_image
|
| 1071 |
+
else:
|
| 1072 |
+
masked_image_latents = None
|
| 1073 |
+
|
| 1074 |
+
if masked_image is not None:
|
| 1075 |
+
if masked_image_latents is None:
|
| 1076 |
+
masked_image = masked_image.to(device=device, dtype=dtype)
|
| 1077 |
+
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
|
| 1078 |
+
|
| 1079 |
+
if masked_image_latents.shape[0] < batch_size:
|
| 1080 |
+
if not batch_size % masked_image_latents.shape[0] == 0:
|
| 1081 |
+
raise ValueError(
|
| 1082 |
+
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
| 1083 |
+
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
| 1084 |
+
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
| 1085 |
+
)
|
| 1086 |
+
masked_image_latents = masked_image_latents.repeat(
|
| 1087 |
+
batch_size // masked_image_latents.shape[0], 1, 1, 1
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
masked_image_latents = (
|
| 1091 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 1092 |
+
)
|
| 1093 |
+
|
| 1094 |
+
# aligning device to prevent device errors when concating it with the latent model input
|
| 1095 |
+
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
| 1096 |
+
|
| 1097 |
+
return mask, masked_image_latents
|
| 1098 |
+
|
| 1099 |
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
| 1100 |
+
dtype = image.dtype
|
| 1101 |
+
if self.vae.config.force_upcast:
|
| 1102 |
+
image = image.float()
|
| 1103 |
+
self.vae.to(dtype=torch.float32)
|
| 1104 |
+
|
| 1105 |
+
if isinstance(generator, list):
|
| 1106 |
+
image_latents = [
|
| 1107 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 1108 |
+
for i in range(image.shape[0])
|
| 1109 |
+
]
|
| 1110 |
+
image_latents = torch.cat(image_latents, dim=0)
|
| 1111 |
+
else:
|
| 1112 |
+
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 1113 |
+
|
| 1114 |
+
if self.vae.config.force_upcast:
|
| 1115 |
+
self.vae.to(dtype)
|
| 1116 |
+
|
| 1117 |
+
image_latents = image_latents.to(dtype)
|
| 1118 |
+
image_latents = self.vae.config.scaling_factor * image_latents
|
| 1119 |
+
|
| 1120 |
+
return image_latents
|
| 1121 |
+
|
| 1122 |
+
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
| 1123 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 1124 |
+
|
| 1125 |
+
passed_add_embed_dim = (
|
| 1126 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
| 1127 |
+
)
|
| 1128 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 1129 |
+
|
| 1130 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 1131 |
+
raise ValueError(
|
| 1132 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 1133 |
+
)
|
| 1134 |
+
|
| 1135 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 1136 |
+
return add_time_ids
|
| 1137 |
+
|
| 1138 |
+
def upcast_vae(self):
|
| 1139 |
+
dtype = self.vae.dtype
|
| 1140 |
+
self.vae.to(dtype=torch.float32)
|
| 1141 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 1142 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 1143 |
+
(
|
| 1144 |
+
AttnProcessor2_0,
|
| 1145 |
+
XFormersAttnProcessor,
|
| 1146 |
+
LoRAXFormersAttnProcessor,
|
| 1147 |
+
LoRAAttnProcessor2_0,
|
| 1148 |
+
FusedAttnProcessor2_0,
|
| 1149 |
+
),
|
| 1150 |
+
)
|
| 1151 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 1152 |
+
# to be in float32 which can save lots of memory
|
| 1153 |
+
if use_torch_2_0_or_xformers:
|
| 1154 |
+
self.vae.post_quant_conv.to(dtype)
|
| 1155 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 1156 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 1157 |
+
|
| 1158 |
+
def _enable_shared_attention_processors(
|
| 1159 |
+
self,
|
| 1160 |
+
share_attention: bool,
|
| 1161 |
+
adain_queries: bool,
|
| 1162 |
+
adain_keys: bool,
|
| 1163 |
+
adain_values: bool,
|
| 1164 |
+
full_attention_share: bool,
|
| 1165 |
+
shared_score_scale: float,
|
| 1166 |
+
shared_score_shift: float,
|
| 1167 |
+
only_self_level: float,
|
| 1168 |
+
):
|
| 1169 |
+
r"""Helper method to enable usage of Shared Attention Processor."""
|
| 1170 |
+
attn_procs = {}
|
| 1171 |
+
num_self_layers = len([name for name in self.unet.attn_processors.keys() if "attn1" in name])
|
| 1172 |
+
|
| 1173 |
+
only_self_vec = get_switch_vec(num_self_layers, only_self_level)
|
| 1174 |
+
|
| 1175 |
+
for i, name in enumerate(self.unet.attn_processors.keys()):
|
| 1176 |
+
is_self_attention = "attn1" in name
|
| 1177 |
+
if is_self_attention:
|
| 1178 |
+
if only_self_vec[i // 2]:
|
| 1179 |
+
attn_procs[name] = AttnProcessor2_0()
|
| 1180 |
+
else:
|
| 1181 |
+
attn_procs[name] = SharedAttentionProcessor(
|
| 1182 |
+
share_attention=share_attention,
|
| 1183 |
+
adain_queries=adain_queries,
|
| 1184 |
+
adain_keys=adain_keys,
|
| 1185 |
+
adain_values=adain_values,
|
| 1186 |
+
full_attention_share=full_attention_share,
|
| 1187 |
+
shared_score_scale=shared_score_scale,
|
| 1188 |
+
shared_score_shift=shared_score_shift,
|
| 1189 |
+
)
|
| 1190 |
+
else:
|
| 1191 |
+
attn_procs[name] = AttnProcessor2_0()
|
| 1192 |
+
|
| 1193 |
+
self.unet.set_attn_processor(attn_procs)
|
| 1194 |
+
|
| 1195 |
+
def _disable_shared_attention_processors(self):
|
| 1196 |
+
r"""
|
| 1197 |
+
Helper method to disable usage of the Shared Attention Processor. All processors
|
| 1198 |
+
are reset to the default Attention Processor for pytorch versions above 2.0.
|
| 1199 |
+
"""
|
| 1200 |
+
attn_procs = {}
|
| 1201 |
+
|
| 1202 |
+
for i, name in enumerate(self.unet.attn_processors.keys()):
|
| 1203 |
+
attn_procs[name] = AttnProcessor2_0()
|
| 1204 |
+
|
| 1205 |
+
self.unet.set_attn_processor(attn_procs)
|
| 1206 |
+
|
| 1207 |
+
def _register_shared_norm(self, share_group_norm: bool = True, share_layer_norm: bool = True):
|
| 1208 |
+
r"""Helper method to register shared group/layer normalization layers."""
|
| 1209 |
+
|
| 1210 |
+
def register_norm_forward(norm_layer: Union[nn.GroupNorm, nn.LayerNorm]) -> Union[nn.GroupNorm, nn.LayerNorm]:
|
| 1211 |
+
if not hasattr(norm_layer, "orig_forward"):
|
| 1212 |
+
setattr(norm_layer, "orig_forward", norm_layer.forward)
|
| 1213 |
+
orig_forward = norm_layer.orig_forward
|
| 1214 |
+
|
| 1215 |
+
def forward_(hidden_states: torch.Tensor) -> torch.Tensor:
|
| 1216 |
+
n = hidden_states.shape[-2]
|
| 1217 |
+
hidden_states = concat_first(hidden_states, dim=-2)
|
| 1218 |
+
hidden_states = orig_forward(hidden_states)
|
| 1219 |
+
return hidden_states[..., :n, :]
|
| 1220 |
+
|
| 1221 |
+
norm_layer.forward = forward_
|
| 1222 |
+
return norm_layer
|
| 1223 |
+
|
| 1224 |
+
def get_norm_layers(pipeline_, norm_layers_: Dict[str, List[Union[nn.GroupNorm, nn.LayerNorm]]]):
|
| 1225 |
+
if isinstance(pipeline_, nn.LayerNorm) and share_layer_norm:
|
| 1226 |
+
norm_layers_["layer"].append(pipeline_)
|
| 1227 |
+
if isinstance(pipeline_, nn.GroupNorm) and share_group_norm:
|
| 1228 |
+
norm_layers_["group"].append(pipeline_)
|
| 1229 |
+
else:
|
| 1230 |
+
for layer in pipeline_.children():
|
| 1231 |
+
get_norm_layers(layer, norm_layers_)
|
| 1232 |
+
|
| 1233 |
+
norm_layers = {"group": [], "layer": []}
|
| 1234 |
+
get_norm_layers(self.unet, norm_layers)
|
| 1235 |
+
|
| 1236 |
+
norm_layers_list = []
|
| 1237 |
+
for key in ["group", "layer"]:
|
| 1238 |
+
for layer in norm_layers[key]:
|
| 1239 |
+
norm_layers_list.append(register_norm_forward(layer))
|
| 1240 |
+
|
| 1241 |
+
return norm_layers_list
|
| 1242 |
+
|
| 1243 |
+
@property
|
| 1244 |
+
def style_aligned_enabled(self):
|
| 1245 |
+
r"""Returns whether StyleAligned has been enabled in the pipeline or not."""
|
| 1246 |
+
return hasattr(self, "_style_aligned_norm_layers") and self._style_aligned_norm_layers is not None
|
| 1247 |
+
|
| 1248 |
+
def enable_style_aligned(
|
| 1249 |
+
self,
|
| 1250 |
+
share_group_norm: bool = True,
|
| 1251 |
+
share_layer_norm: bool = True,
|
| 1252 |
+
share_attention: bool = True,
|
| 1253 |
+
adain_queries: bool = True,
|
| 1254 |
+
adain_keys: bool = True,
|
| 1255 |
+
adain_values: bool = False,
|
| 1256 |
+
full_attention_share: bool = False,
|
| 1257 |
+
shared_score_scale: float = 1.0,
|
| 1258 |
+
shared_score_shift: float = 0.0,
|
| 1259 |
+
only_self_level: float = 0.0,
|
| 1260 |
+
):
|
| 1261 |
+
r"""
|
| 1262 |
+
Enables the StyleAligned mechanism as in https://arxiv.org/abs/2312.02133.
|
| 1263 |
+
|
| 1264 |
+
Args:
|
| 1265 |
+
share_group_norm (`bool`, defaults to `True`):
|
| 1266 |
+
Whether or not to use shared group normalization layers.
|
| 1267 |
+
share_layer_norm (`bool`, defaults to `True`):
|
| 1268 |
+
Whether or not to use shared layer normalization layers.
|
| 1269 |
+
share_attention (`bool`, defaults to `True`):
|
| 1270 |
+
Whether or not to use attention sharing between batch images.
|
| 1271 |
+
adain_queries (`bool`, defaults to `True`):
|
| 1272 |
+
Whether or not to apply the AdaIn operation on attention queries.
|
| 1273 |
+
adain_keys (`bool`, defaults to `True`):
|
| 1274 |
+
Whether or not to apply the AdaIn operation on attention keys.
|
| 1275 |
+
adain_values (`bool`, defaults to `False`):
|
| 1276 |
+
Whether or not to apply the AdaIn operation on attention values.
|
| 1277 |
+
full_attention_share (`bool`, defaults to `False`):
|
| 1278 |
+
Whether or not to use full attention sharing between all images in a batch. Can
|
| 1279 |
+
lead to content leakage within each batch and some loss in diversity.
|
| 1280 |
+
shared_score_scale (`float`, defaults to `1.0`):
|
| 1281 |
+
Scale for shared attention.
|
| 1282 |
+
"""
|
| 1283 |
+
self._style_aligned_norm_layers = self._register_shared_norm(share_group_norm, share_layer_norm)
|
| 1284 |
+
self._enable_shared_attention_processors(
|
| 1285 |
+
share_attention=share_attention,
|
| 1286 |
+
adain_queries=adain_queries,
|
| 1287 |
+
adain_keys=adain_keys,
|
| 1288 |
+
adain_values=adain_values,
|
| 1289 |
+
full_attention_share=full_attention_share,
|
| 1290 |
+
shared_score_scale=shared_score_scale,
|
| 1291 |
+
shared_score_shift=shared_score_shift,
|
| 1292 |
+
only_self_level=only_self_level,
|
| 1293 |
+
)
|
| 1294 |
+
|
| 1295 |
+
def disable_style_aligned(self):
|
| 1296 |
+
r"""Disables the StyleAligned mechanism if it had been previously enabled."""
|
| 1297 |
+
if self.style_aligned_enabled:
|
| 1298 |
+
for layer in self._style_aligned_norm_layers:
|
| 1299 |
+
layer.forward = layer.orig_forward
|
| 1300 |
+
|
| 1301 |
+
self._style_aligned_norm_layers = None
|
| 1302 |
+
self._disable_shared_attention_processors()
|
| 1303 |
+
|
| 1304 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 1305 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 1306 |
+
"""
|
| 1307 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 1308 |
+
|
| 1309 |
+
Args:
|
| 1310 |
+
timesteps (`torch.Tensor`):
|
| 1311 |
+
generate embedding vectors at these timesteps
|
| 1312 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 1313 |
+
dimension of the embeddings to generate
|
| 1314 |
+
dtype:
|
| 1315 |
+
data type of the generated embeddings
|
| 1316 |
+
|
| 1317 |
+
Returns:
|
| 1318 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 1319 |
+
"""
|
| 1320 |
+
assert len(w.shape) == 1
|
| 1321 |
+
w = w * 1000.0
|
| 1322 |
+
|
| 1323 |
+
half_dim = embedding_dim // 2
|
| 1324 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 1325 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 1326 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 1327 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 1328 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 1329 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 1330 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 1331 |
+
return emb
|
| 1332 |
+
|
| 1333 |
+
@property
|
| 1334 |
+
def guidance_scale(self):
|
| 1335 |
+
return self._guidance_scale
|
| 1336 |
+
|
| 1337 |
+
@property
|
| 1338 |
+
def guidance_rescale(self):
|
| 1339 |
+
return self._guidance_rescale
|
| 1340 |
+
|
| 1341 |
+
@property
|
| 1342 |
+
def clip_skip(self):
|
| 1343 |
+
return self._clip_skip
|
| 1344 |
+
|
| 1345 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1346 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1347 |
+
# corresponds to doing no classifier free guidance.
|
| 1348 |
+
@property
|
| 1349 |
+
def do_classifier_free_guidance(self):
|
| 1350 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 1351 |
+
|
| 1352 |
+
@property
|
| 1353 |
+
def cross_attention_kwargs(self):
|
| 1354 |
+
return self._cross_attention_kwargs
|
| 1355 |
+
|
| 1356 |
+
@property
|
| 1357 |
+
def denoising_end(self):
|
| 1358 |
+
return self._denoising_end
|
| 1359 |
+
|
| 1360 |
+
@property
|
| 1361 |
+
def denoising_start(self):
|
| 1362 |
+
return self._denoising_start
|
| 1363 |
+
|
| 1364 |
+
@property
|
| 1365 |
+
def num_timesteps(self):
|
| 1366 |
+
return self._num_timesteps
|
| 1367 |
+
|
| 1368 |
+
@property
|
| 1369 |
+
def interrupt(self):
|
| 1370 |
+
return self._interrupt
|
| 1371 |
+
|
| 1372 |
+
@torch.no_grad()
|
| 1373 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 1374 |
+
def __call__(
|
| 1375 |
+
self,
|
| 1376 |
+
prompt: Union[str, List[str]] = None,
|
| 1377 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 1378 |
+
image: Optional[PipelineImageInput] = None,
|
| 1379 |
+
mask_image: Optional[PipelineImageInput] = None,
|
| 1380 |
+
masked_image_latents: Optional[torch.FloatTensor] = None,
|
| 1381 |
+
strength: float = 0.3,
|
| 1382 |
+
height: Optional[int] = None,
|
| 1383 |
+
width: Optional[int] = None,
|
| 1384 |
+
num_inference_steps: int = 50,
|
| 1385 |
+
timesteps: List[int] = None,
|
| 1386 |
+
denoising_start: Optional[float] = None,
|
| 1387 |
+
denoising_end: Optional[float] = None,
|
| 1388 |
+
guidance_scale: float = 5.0,
|
| 1389 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1390 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 1391 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1392 |
+
eta: float = 0.0,
|
| 1393 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1394 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 1395 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1396 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1397 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1398 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1399 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 1400 |
+
output_type: Optional[str] = "pil",
|
| 1401 |
+
return_dict: bool = True,
|
| 1402 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1403 |
+
guidance_rescale: float = 0.0,
|
| 1404 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 1405 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1406 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 1407 |
+
clip_skip: Optional[int] = None,
|
| 1408 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 1409 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 1410 |
+
**kwargs,
|
| 1411 |
+
):
|
| 1412 |
+
r"""
|
| 1413 |
+
Function invoked when calling the pipeline for generation.
|
| 1414 |
+
|
| 1415 |
+
Args:
|
| 1416 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1417 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 1418 |
+
instead.
|
| 1419 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 1420 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 1421 |
+
used in both text-encoders
|
| 1422 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1423 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 1424 |
+
Anything below 512 pixels won't work well for
|
| 1425 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1426 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1427 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1428 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 1429 |
+
Anything below 512 pixels won't work well for
|
| 1430 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1431 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1432 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1433 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1434 |
+
expense of slower inference.
|
| 1435 |
+
timesteps (`List[int]`, *optional*):
|
| 1436 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 1437 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 1438 |
+
passed will be used. Must be in descending order.
|
| 1439 |
+
denoising_end (`float`, *optional*):
|
| 1440 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 1441 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 1442 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 1443 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 1444 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 1445 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 1446 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 1447 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1448 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1449 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1450 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1451 |
+
usually at the expense of lower image quality.
|
| 1452 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1453 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1454 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1455 |
+
less than `1`).
|
| 1456 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 1457 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 1458 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 1459 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1460 |
+
The number of images to generate per prompt.
|
| 1461 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1462 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1463 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1464 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1465 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1466 |
+
to make generation deterministic.
|
| 1467 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 1468 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1469 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1470 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 1471 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1472 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1473 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1474 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1475 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1476 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1477 |
+
argument.
|
| 1478 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1479 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 1480 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 1481 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1482 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1483 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 1484 |
+
input argument.
|
| 1485 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
| 1486 |
+
Optional image input to work with IP Adapters.
|
| 1487 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1488 |
+
The output format of the generate image. Choose between
|
| 1489 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1490 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1491 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 1492 |
+
of a plain tuple.
|
| 1493 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1494 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1495 |
+
`self.processor` in
|
| 1496 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1497 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 1498 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 1499 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 1500 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 1501 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 1502 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1503 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 1504 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 1505 |
+
explained in section 2.2 of
|
| 1506 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1507 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1508 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1509 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1510 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1511 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1512 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1513 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1514 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 1515 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1516 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1517 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 1518 |
+
micro-conditioning as explained in section 2.2 of
|
| 1519 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1520 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1521 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1522 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 1523 |
+
micro-conditioning as explained in section 2.2 of
|
| 1524 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1525 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1526 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1527 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 1528 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1529 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1530 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1531 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 1532 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 1533 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 1534 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 1535 |
+
`callback_on_step_end_tensor_inputs`.
|
| 1536 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 1537 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 1538 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 1539 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 1540 |
+
|
| 1541 |
+
Examples:
|
| 1542 |
+
|
| 1543 |
+
Returns:
|
| 1544 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
|
| 1545 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
| 1546 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 1547 |
+
"""
|
| 1548 |
+
|
| 1549 |
+
callback = kwargs.pop("callback", None)
|
| 1550 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 1551 |
+
|
| 1552 |
+
if callback is not None:
|
| 1553 |
+
deprecate(
|
| 1554 |
+
"callback",
|
| 1555 |
+
"1.0.0",
|
| 1556 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 1557 |
+
)
|
| 1558 |
+
if callback_steps is not None:
|
| 1559 |
+
deprecate(
|
| 1560 |
+
"callback_steps",
|
| 1561 |
+
"1.0.0",
|
| 1562 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 1563 |
+
)
|
| 1564 |
+
|
| 1565 |
+
# 0. Default height and width to unet
|
| 1566 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 1567 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 1568 |
+
|
| 1569 |
+
original_size = original_size or (height, width)
|
| 1570 |
+
target_size = target_size or (height, width)
|
| 1571 |
+
|
| 1572 |
+
# 1. Check inputs. Raise error if not correct
|
| 1573 |
+
self.check_inputs(
|
| 1574 |
+
prompt=prompt,
|
| 1575 |
+
prompt_2=prompt_2,
|
| 1576 |
+
height=height,
|
| 1577 |
+
width=width,
|
| 1578 |
+
callback_steps=callback_steps,
|
| 1579 |
+
negative_prompt=negative_prompt,
|
| 1580 |
+
negative_prompt_2=negative_prompt_2,
|
| 1581 |
+
prompt_embeds=prompt_embeds,
|
| 1582 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1583 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1584 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1585 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 1586 |
+
)
|
| 1587 |
+
|
| 1588 |
+
self._guidance_scale = guidance_scale
|
| 1589 |
+
self._guidance_rescale = guidance_rescale
|
| 1590 |
+
self._clip_skip = clip_skip
|
| 1591 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1592 |
+
self._denoising_end = denoising_end
|
| 1593 |
+
self._denoising_start = denoising_start
|
| 1594 |
+
self._interrupt = False
|
| 1595 |
+
|
| 1596 |
+
# 2. Define call parameters
|
| 1597 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1598 |
+
batch_size = 1
|
| 1599 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1600 |
+
batch_size = len(prompt)
|
| 1601 |
+
else:
|
| 1602 |
+
batch_size = prompt_embeds.shape[0]
|
| 1603 |
+
|
| 1604 |
+
device = self._execution_device
|
| 1605 |
+
|
| 1606 |
+
# 3. Encode input prompt
|
| 1607 |
+
lora_scale = (
|
| 1608 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1609 |
+
)
|
| 1610 |
+
|
| 1611 |
+
(
|
| 1612 |
+
prompt_embeds,
|
| 1613 |
+
negative_prompt_embeds,
|
| 1614 |
+
pooled_prompt_embeds,
|
| 1615 |
+
negative_pooled_prompt_embeds,
|
| 1616 |
+
) = self.encode_prompt(
|
| 1617 |
+
prompt=prompt,
|
| 1618 |
+
prompt_2=prompt_2,
|
| 1619 |
+
device=device,
|
| 1620 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1621 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1622 |
+
negative_prompt=negative_prompt,
|
| 1623 |
+
negative_prompt_2=negative_prompt_2,
|
| 1624 |
+
prompt_embeds=prompt_embeds,
|
| 1625 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1626 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1627 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1628 |
+
lora_scale=lora_scale,
|
| 1629 |
+
clip_skip=self.clip_skip,
|
| 1630 |
+
)
|
| 1631 |
+
|
| 1632 |
+
# 4. Preprocess image and mask_image
|
| 1633 |
+
if image is not None:
|
| 1634 |
+
image = self.image_processor.preprocess(image, height=height, width=width)
|
| 1635 |
+
image = image.to(device=self.device, dtype=prompt_embeds.dtype)
|
| 1636 |
+
|
| 1637 |
+
if mask_image is not None:
|
| 1638 |
+
mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
|
| 1639 |
+
mask = mask.to(device=self.device, dtype=prompt_embeds.dtype)
|
| 1640 |
+
|
| 1641 |
+
if masked_image_latents is not None:
|
| 1642 |
+
masked_image = masked_image_latents
|
| 1643 |
+
elif image.shape[1] == 4:
|
| 1644 |
+
# if image is in latent space, we can't mask it
|
| 1645 |
+
masked_image = None
|
| 1646 |
+
else:
|
| 1647 |
+
masked_image = image * (mask < 0.5)
|
| 1648 |
+
else:
|
| 1649 |
+
mask = None
|
| 1650 |
+
|
| 1651 |
+
# 4. Prepare timesteps
|
| 1652 |
+
def denoising_value_valid(dnv):
|
| 1653 |
+
return isinstance(dnv, float) and 0 < dnv < 1
|
| 1654 |
+
|
| 1655 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 1656 |
+
|
| 1657 |
+
if image is not None:
|
| 1658 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 1659 |
+
num_inference_steps,
|
| 1660 |
+
strength,
|
| 1661 |
+
device,
|
| 1662 |
+
denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
|
| 1663 |
+
)
|
| 1664 |
+
|
| 1665 |
+
# check that number of inference steps is not < 1 - as this doesn't make sense
|
| 1666 |
+
if num_inference_steps < 1:
|
| 1667 |
+
raise ValueError(
|
| 1668 |
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
| 1669 |
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
| 1670 |
+
)
|
| 1671 |
+
|
| 1672 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1673 |
+
is_strength_max = strength == 1.0
|
| 1674 |
+
add_noise = True if self.denoising_start is None else False
|
| 1675 |
+
|
| 1676 |
+
# 5. Prepare latent variables
|
| 1677 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1678 |
+
num_channels_unet = self.unet.config.in_channels
|
| 1679 |
+
return_image_latents = num_channels_unet == 4
|
| 1680 |
+
|
| 1681 |
+
latents = self.prepare_latents(
|
| 1682 |
+
image=image,
|
| 1683 |
+
mask=mask,
|
| 1684 |
+
width=width,
|
| 1685 |
+
height=height,
|
| 1686 |
+
num_channels_latents=num_channels_latents,
|
| 1687 |
+
timestep=latent_timestep,
|
| 1688 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1689 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1690 |
+
dtype=prompt_embeds.dtype,
|
| 1691 |
+
device=device,
|
| 1692 |
+
generator=generator,
|
| 1693 |
+
add_noise=add_noise,
|
| 1694 |
+
latents=latents,
|
| 1695 |
+
is_strength_max=is_strength_max,
|
| 1696 |
+
return_noise=True,
|
| 1697 |
+
return_image_latents=return_image_latents,
|
| 1698 |
+
)
|
| 1699 |
+
|
| 1700 |
+
if mask is not None:
|
| 1701 |
+
if return_image_latents:
|
| 1702 |
+
latents, noise, image_latents = latents
|
| 1703 |
+
else:
|
| 1704 |
+
latents, noise = latents
|
| 1705 |
+
|
| 1706 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 1707 |
+
mask=mask,
|
| 1708 |
+
masked_image=masked_image,
|
| 1709 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1710 |
+
height=height,
|
| 1711 |
+
width=width,
|
| 1712 |
+
dtype=prompt_embeds.dtype,
|
| 1713 |
+
device=device,
|
| 1714 |
+
generator=generator,
|
| 1715 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1716 |
+
)
|
| 1717 |
+
|
| 1718 |
+
# Check that sizes of mask, masked image and latents match
|
| 1719 |
+
if num_channels_unet == 9:
|
| 1720 |
+
# default case for runwayml/stable-diffusion-inpainting
|
| 1721 |
+
num_channels_mask = mask.shape[1]
|
| 1722 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1723 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
|
| 1724 |
+
raise ValueError(
|
| 1725 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 1726 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 1727 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 1728 |
+
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
|
| 1729 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 1730 |
+
)
|
| 1731 |
+
elif num_channels_unet != 4:
|
| 1732 |
+
raise ValueError(
|
| 1733 |
+
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
|
| 1734 |
+
)
|
| 1735 |
+
|
| 1736 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1737 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1738 |
+
|
| 1739 |
+
height, width = latents.shape[-2:]
|
| 1740 |
+
height = height * self.vae_scale_factor
|
| 1741 |
+
width = width * self.vae_scale_factor
|
| 1742 |
+
|
| 1743 |
+
original_size = original_size or (height, width)
|
| 1744 |
+
target_size = target_size or (height, width)
|
| 1745 |
+
|
| 1746 |
+
# 7. Prepare added time ids & embeddings
|
| 1747 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1748 |
+
add_time_ids = self._get_add_time_ids(
|
| 1749 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
| 1750 |
+
)
|
| 1751 |
+
|
| 1752 |
+
if self.do_classifier_free_guidance:
|
| 1753 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1754 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1755 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
| 1756 |
+
|
| 1757 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1758 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1759 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1760 |
+
|
| 1761 |
+
if ip_adapter_image is not None:
|
| 1762 |
+
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
|
| 1763 |
+
image_embeds, negative_image_embeds = self.encode_image(
|
| 1764 |
+
ip_adapter_image, device, num_images_per_prompt, output_hidden_state
|
| 1765 |
+
)
|
| 1766 |
+
if self.do_classifier_free_guidance:
|
| 1767 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1768 |
+
image_embeds = image_embeds.to(device)
|
| 1769 |
+
|
| 1770 |
+
# 8. Denoising loop
|
| 1771 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1772 |
+
|
| 1773 |
+
# 8.1 Apply denoising_end
|
| 1774 |
+
if (
|
| 1775 |
+
self.denoising_end is not None
|
| 1776 |
+
and isinstance(self.denoising_end, float)
|
| 1777 |
+
and self.denoising_end > 0
|
| 1778 |
+
and self.denoising_end < 1
|
| 1779 |
+
):
|
| 1780 |
+
discrete_timestep_cutoff = int(
|
| 1781 |
+
round(
|
| 1782 |
+
self.scheduler.config.num_train_timesteps
|
| 1783 |
+
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1784 |
+
)
|
| 1785 |
+
)
|
| 1786 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1787 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1788 |
+
|
| 1789 |
+
# 9. Optionally get Guidance Scale Embedding
|
| 1790 |
+
timestep_cond = None
|
| 1791 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1792 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1793 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1794 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1795 |
+
).to(device=device, dtype=latents.dtype)
|
| 1796 |
+
|
| 1797 |
+
self._num_timesteps = len(timesteps)
|
| 1798 |
+
|
| 1799 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1800 |
+
for i, t in enumerate(timesteps):
|
| 1801 |
+
if self.interrupt:
|
| 1802 |
+
continue
|
| 1803 |
+
|
| 1804 |
+
# expand the latents if we are doing classifier free guidance
|
| 1805 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1806 |
+
|
| 1807 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1808 |
+
|
| 1809 |
+
# predict the noise residual
|
| 1810 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1811 |
+
if ip_adapter_image is not None:
|
| 1812 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1813 |
+
|
| 1814 |
+
noise_pred = self.unet(
|
| 1815 |
+
latent_model_input,
|
| 1816 |
+
t,
|
| 1817 |
+
encoder_hidden_states=prompt_embeds,
|
| 1818 |
+
timestep_cond=timestep_cond,
|
| 1819 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1820 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1821 |
+
return_dict=False,
|
| 1822 |
+
)[0]
|
| 1823 |
+
|
| 1824 |
+
# perform guidance
|
| 1825 |
+
if self.do_classifier_free_guidance:
|
| 1826 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1827 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1828 |
+
|
| 1829 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1830 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1831 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1832 |
+
|
| 1833 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1834 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1835 |
+
|
| 1836 |
+
if mask is not None and num_channels_unet == 4:
|
| 1837 |
+
init_latents_proper = image_latents
|
| 1838 |
+
|
| 1839 |
+
if self.do_classifier_free_guidance:
|
| 1840 |
+
init_mask, _ = mask.chunk(2)
|
| 1841 |
+
else:
|
| 1842 |
+
init_mask = mask
|
| 1843 |
+
|
| 1844 |
+
if i < len(timesteps) - 1:
|
| 1845 |
+
noise_timestep = timesteps[i + 1]
|
| 1846 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 1847 |
+
init_latents_proper, noise, torch.tensor([noise_timestep])
|
| 1848 |
+
)
|
| 1849 |
+
|
| 1850 |
+
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
|
| 1851 |
+
|
| 1852 |
+
if callback_on_step_end is not None:
|
| 1853 |
+
callback_kwargs = {}
|
| 1854 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1855 |
+
callback_kwargs[k] = locals()[k]
|
| 1856 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1857 |
+
|
| 1858 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1859 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1860 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1861 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 1862 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 1863 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 1864 |
+
)
|
| 1865 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 1866 |
+
|
| 1867 |
+
# call the callback, if provided
|
| 1868 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1869 |
+
progress_bar.update()
|
| 1870 |
+
if callback is not None and i % callback_steps == 0:
|
| 1871 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1872 |
+
callback(step_idx, t, latents)
|
| 1873 |
+
|
| 1874 |
+
if XLA_AVAILABLE:
|
| 1875 |
+
xm.mark_step()
|
| 1876 |
+
|
| 1877 |
+
if not output_type == "latent":
|
| 1878 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1879 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1880 |
+
|
| 1881 |
+
if needs_upcasting:
|
| 1882 |
+
self.upcast_vae()
|
| 1883 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1884 |
+
|
| 1885 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1886 |
+
|
| 1887 |
+
# cast back to fp16 if needed
|
| 1888 |
+
if needs_upcasting:
|
| 1889 |
+
self.vae.to(dtype=torch.float16)
|
| 1890 |
+
else:
|
| 1891 |
+
image = latents
|
| 1892 |
+
|
| 1893 |
+
if not output_type == "latent":
|
| 1894 |
+
# apply watermark if available
|
| 1895 |
+
if self.watermark is not None:
|
| 1896 |
+
image = self.watermark.apply_watermark(image)
|
| 1897 |
+
|
| 1898 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1899 |
+
|
| 1900 |
+
# Offload all models
|
| 1901 |
+
self.maybe_free_model_hooks()
|
| 1902 |
+
|
| 1903 |
+
if not return_dict:
|
| 1904 |
+
return (image,)
|
| 1905 |
+
|
| 1906 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.27.0/pipeline_stable_diffusion_upscale_ldm3d.py
ADDED
|
@@ -0,0 +1,772 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import PIL
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 22 |
+
|
| 23 |
+
from diffusers import DiffusionPipeline
|
| 24 |
+
from diffusers.image_processor import PipelineDepthInput, PipelineImageInput, VaeImageProcessorLDM3D
|
| 25 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 26 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 27 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 28 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 29 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d import LDM3DPipelineOutput
|
| 30 |
+
from diffusers.schedulers import DDPMScheduler, KarrasDiffusionSchedulers
|
| 31 |
+
from diffusers.utils import (
|
| 32 |
+
USE_PEFT_BACKEND,
|
| 33 |
+
deprecate,
|
| 34 |
+
logging,
|
| 35 |
+
scale_lora_layers,
|
| 36 |
+
unscale_lora_layers,
|
| 37 |
+
)
|
| 38 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 42 |
+
|
| 43 |
+
EXAMPLE_DOC_STRING = """
|
| 44 |
+
Examples:
|
| 45 |
+
```python
|
| 46 |
+
>>> from diffusers import StableDiffusionUpscaleLDM3DPipeline
|
| 47 |
+
>>> from PIL import Image
|
| 48 |
+
>>> from io import BytesIO
|
| 49 |
+
>>> import requests
|
| 50 |
+
|
| 51 |
+
>>> pipe = StableDiffusionUpscaleLDM3DPipeline.from_pretrained("Intel/ldm3d-sr")
|
| 52 |
+
>>> pipe = pipe.to("cuda")
|
| 53 |
+
>>> rgb_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_rgb.jpg"
|
| 54 |
+
>>> depth_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_depth.png"
|
| 55 |
+
>>> low_res_rgb = Image.open(BytesIO(requests.get(rgb_path).content)).convert("RGB")
|
| 56 |
+
>>> low_res_depth = Image.open(BytesIO(requests.get(depth_path).content)).convert("L")
|
| 57 |
+
>>> output = pipe(
|
| 58 |
+
... prompt="high quality high resolution uhd 4k image",
|
| 59 |
+
... rgb=low_res_rgb,
|
| 60 |
+
... depth=low_res_depth,
|
| 61 |
+
... num_inference_steps=50,
|
| 62 |
+
... target_res=[1024, 1024],
|
| 63 |
+
... )
|
| 64 |
+
>>> rgb_image, depth_image = output.rgb, output.depth
|
| 65 |
+
>>> rgb_image[0].save("hr_ldm3d_rgb.jpg")
|
| 66 |
+
>>> depth_image[0].save("hr_ldm3d_depth.png")
|
| 67 |
+
```
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class StableDiffusionUpscaleLDM3DPipeline(
|
| 72 |
+
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 73 |
+
):
|
| 74 |
+
r"""
|
| 75 |
+
Pipeline for text-to-image and 3D generation using LDM3D.
|
| 76 |
+
|
| 77 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 78 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 79 |
+
|
| 80 |
+
The pipeline also inherits the following loading methods:
|
| 81 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 82 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 83 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 84 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
vae ([`AutoencoderKL`]):
|
| 88 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 89 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 90 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 91 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 92 |
+
A `CLIPTokenizer` to tokenize text.
|
| 93 |
+
unet ([`UNet2DConditionModel`]):
|
| 94 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 95 |
+
low_res_scheduler ([`SchedulerMixin`]):
|
| 96 |
+
A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of
|
| 97 |
+
[`DDPMScheduler`].
|
| 98 |
+
scheduler ([`SchedulerMixin`]):
|
| 99 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 100 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 101 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 102 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 103 |
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
| 104 |
+
about a model's potential harms.
|
| 105 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 106 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 110 |
+
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
vae: AutoencoderKL,
|
| 114 |
+
text_encoder: CLIPTextModel,
|
| 115 |
+
tokenizer: CLIPTokenizer,
|
| 116 |
+
unet: UNet2DConditionModel,
|
| 117 |
+
low_res_scheduler: DDPMScheduler,
|
| 118 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 119 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 120 |
+
feature_extractor: CLIPImageProcessor,
|
| 121 |
+
requires_safety_checker: bool = True,
|
| 122 |
+
watermarker: Optional[Any] = None,
|
| 123 |
+
max_noise_level: int = 350,
|
| 124 |
+
):
|
| 125 |
+
super().__init__()
|
| 126 |
+
|
| 127 |
+
if safety_checker is None and requires_safety_checker:
|
| 128 |
+
logger.warning(
|
| 129 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 130 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 131 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 132 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 133 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 134 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
if safety_checker is not None and feature_extractor is None:
|
| 138 |
+
raise ValueError(
|
| 139 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 140 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
self.register_modules(
|
| 144 |
+
vae=vae,
|
| 145 |
+
text_encoder=text_encoder,
|
| 146 |
+
tokenizer=tokenizer,
|
| 147 |
+
unet=unet,
|
| 148 |
+
low_res_scheduler=low_res_scheduler,
|
| 149 |
+
scheduler=scheduler,
|
| 150 |
+
safety_checker=safety_checker,
|
| 151 |
+
watermarker=watermarker,
|
| 152 |
+
feature_extractor=feature_extractor,
|
| 153 |
+
)
|
| 154 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 155 |
+
self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
|
| 156 |
+
# self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 157 |
+
self.register_to_config(max_noise_level=max_noise_level)
|
| 158 |
+
|
| 159 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline._encode_prompt
|
| 160 |
+
def _encode_prompt(
|
| 161 |
+
self,
|
| 162 |
+
prompt,
|
| 163 |
+
device,
|
| 164 |
+
num_images_per_prompt,
|
| 165 |
+
do_classifier_free_guidance,
|
| 166 |
+
negative_prompt=None,
|
| 167 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 168 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 169 |
+
lora_scale: Optional[float] = None,
|
| 170 |
+
**kwargs,
|
| 171 |
+
):
|
| 172 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 173 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 174 |
+
|
| 175 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 176 |
+
prompt=prompt,
|
| 177 |
+
device=device,
|
| 178 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 179 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 180 |
+
negative_prompt=negative_prompt,
|
| 181 |
+
prompt_embeds=prompt_embeds,
|
| 182 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 183 |
+
lora_scale=lora_scale,
|
| 184 |
+
**kwargs,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# concatenate for backwards comp
|
| 188 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 189 |
+
|
| 190 |
+
return prompt_embeds
|
| 191 |
+
|
| 192 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline.encode_prompt
|
| 193 |
+
def encode_prompt(
|
| 194 |
+
self,
|
| 195 |
+
prompt,
|
| 196 |
+
device,
|
| 197 |
+
num_images_per_prompt,
|
| 198 |
+
do_classifier_free_guidance,
|
| 199 |
+
negative_prompt=None,
|
| 200 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 201 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 202 |
+
lora_scale: Optional[float] = None,
|
| 203 |
+
clip_skip: Optional[int] = None,
|
| 204 |
+
):
|
| 205 |
+
r"""
|
| 206 |
+
Encodes the prompt into text encoder hidden states.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 210 |
+
prompt to be encoded
|
| 211 |
+
device: (`torch.device`):
|
| 212 |
+
torch device
|
| 213 |
+
num_images_per_prompt (`int`):
|
| 214 |
+
number of images that should be generated per prompt
|
| 215 |
+
do_classifier_free_guidance (`bool`):
|
| 216 |
+
whether to use classifier free guidance or not
|
| 217 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 218 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 219 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 220 |
+
less than `1`).
|
| 221 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 222 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 223 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 224 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 225 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 226 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 227 |
+
argument.
|
| 228 |
+
lora_scale (`float`, *optional*):
|
| 229 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 230 |
+
clip_skip (`int`, *optional*):
|
| 231 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 232 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 233 |
+
"""
|
| 234 |
+
# set lora scale so that monkey patched LoRA
|
| 235 |
+
# function of text encoder can correctly access it
|
| 236 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 237 |
+
self._lora_scale = lora_scale
|
| 238 |
+
|
| 239 |
+
# dynamically adjust the LoRA scale
|
| 240 |
+
if not USE_PEFT_BACKEND:
|
| 241 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 242 |
+
else:
|
| 243 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 244 |
+
|
| 245 |
+
if prompt is not None and isinstance(prompt, str):
|
| 246 |
+
batch_size = 1
|
| 247 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 248 |
+
batch_size = len(prompt)
|
| 249 |
+
else:
|
| 250 |
+
batch_size = prompt_embeds.shape[0]
|
| 251 |
+
|
| 252 |
+
if prompt_embeds is None:
|
| 253 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 254 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 255 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 256 |
+
|
| 257 |
+
text_inputs = self.tokenizer(
|
| 258 |
+
prompt,
|
| 259 |
+
padding="max_length",
|
| 260 |
+
max_length=self.tokenizer.model_max_length,
|
| 261 |
+
truncation=True,
|
| 262 |
+
return_tensors="pt",
|
| 263 |
+
)
|
| 264 |
+
text_input_ids = text_inputs.input_ids
|
| 265 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 266 |
+
|
| 267 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 268 |
+
text_input_ids, untruncated_ids
|
| 269 |
+
):
|
| 270 |
+
removed_text = self.tokenizer.batch_decode(
|
| 271 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 272 |
+
)
|
| 273 |
+
logger.warning(
|
| 274 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 275 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 279 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 280 |
+
else:
|
| 281 |
+
attention_mask = None
|
| 282 |
+
|
| 283 |
+
if clip_skip is None:
|
| 284 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 285 |
+
prompt_embeds = prompt_embeds[0]
|
| 286 |
+
else:
|
| 287 |
+
prompt_embeds = self.text_encoder(
|
| 288 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 289 |
+
)
|
| 290 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 291 |
+
# all the hidden states from the encoder layers. Then index into
|
| 292 |
+
# the tuple to access the hidden states from the desired layer.
|
| 293 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 294 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 295 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 296 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 297 |
+
# layer.
|
| 298 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 299 |
+
|
| 300 |
+
if self.text_encoder is not None:
|
| 301 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 302 |
+
elif self.unet is not None:
|
| 303 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 304 |
+
else:
|
| 305 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 306 |
+
|
| 307 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 308 |
+
|
| 309 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 310 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 311 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 312 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 313 |
+
|
| 314 |
+
# get unconditional embeddings for classifier free guidance
|
| 315 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 316 |
+
uncond_tokens: List[str]
|
| 317 |
+
if negative_prompt is None:
|
| 318 |
+
uncond_tokens = [""] * batch_size
|
| 319 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 320 |
+
raise TypeError(
|
| 321 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 322 |
+
f" {type(prompt)}."
|
| 323 |
+
)
|
| 324 |
+
elif isinstance(negative_prompt, str):
|
| 325 |
+
uncond_tokens = [negative_prompt]
|
| 326 |
+
elif batch_size != len(negative_prompt):
|
| 327 |
+
raise ValueError(
|
| 328 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 329 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 330 |
+
" the batch size of `prompt`."
|
| 331 |
+
)
|
| 332 |
+
else:
|
| 333 |
+
uncond_tokens = negative_prompt
|
| 334 |
+
|
| 335 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 336 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 337 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 338 |
+
|
| 339 |
+
max_length = prompt_embeds.shape[1]
|
| 340 |
+
uncond_input = self.tokenizer(
|
| 341 |
+
uncond_tokens,
|
| 342 |
+
padding="max_length",
|
| 343 |
+
max_length=max_length,
|
| 344 |
+
truncation=True,
|
| 345 |
+
return_tensors="pt",
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 349 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 350 |
+
else:
|
| 351 |
+
attention_mask = None
|
| 352 |
+
|
| 353 |
+
negative_prompt_embeds = self.text_encoder(
|
| 354 |
+
uncond_input.input_ids.to(device),
|
| 355 |
+
attention_mask=attention_mask,
|
| 356 |
+
)
|
| 357 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 358 |
+
|
| 359 |
+
if do_classifier_free_guidance:
|
| 360 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 361 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 362 |
+
|
| 363 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 364 |
+
|
| 365 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 366 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 367 |
+
|
| 368 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 369 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 370 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 371 |
+
|
| 372 |
+
return prompt_embeds, negative_prompt_embeds
|
| 373 |
+
|
| 374 |
+
def run_safety_checker(self, image, device, dtype):
|
| 375 |
+
if self.safety_checker is None:
|
| 376 |
+
has_nsfw_concept = None
|
| 377 |
+
else:
|
| 378 |
+
if torch.is_tensor(image):
|
| 379 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 380 |
+
else:
|
| 381 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 382 |
+
rgb_feature_extractor_input = feature_extractor_input[0]
|
| 383 |
+
safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device)
|
| 384 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 385 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 386 |
+
)
|
| 387 |
+
return image, has_nsfw_concept
|
| 388 |
+
|
| 389 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 390 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 391 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 392 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 393 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 394 |
+
# and should be between [0, 1]
|
| 395 |
+
|
| 396 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 397 |
+
extra_step_kwargs = {}
|
| 398 |
+
if accepts_eta:
|
| 399 |
+
extra_step_kwargs["eta"] = eta
|
| 400 |
+
|
| 401 |
+
# check if the scheduler accepts generator
|
| 402 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 403 |
+
if accepts_generator:
|
| 404 |
+
extra_step_kwargs["generator"] = generator
|
| 405 |
+
return extra_step_kwargs
|
| 406 |
+
|
| 407 |
+
def check_inputs(
|
| 408 |
+
self,
|
| 409 |
+
prompt,
|
| 410 |
+
image,
|
| 411 |
+
noise_level,
|
| 412 |
+
callback_steps,
|
| 413 |
+
negative_prompt=None,
|
| 414 |
+
prompt_embeds=None,
|
| 415 |
+
negative_prompt_embeds=None,
|
| 416 |
+
target_res=None,
|
| 417 |
+
):
|
| 418 |
+
if (callback_steps is None) or (
|
| 419 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 420 |
+
):
|
| 421 |
+
raise ValueError(
|
| 422 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 423 |
+
f" {type(callback_steps)}."
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
if prompt is not None and prompt_embeds is not None:
|
| 427 |
+
raise ValueError(
|
| 428 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 429 |
+
" only forward one of the two."
|
| 430 |
+
)
|
| 431 |
+
elif prompt is None and prompt_embeds is None:
|
| 432 |
+
raise ValueError(
|
| 433 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 434 |
+
)
|
| 435 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 436 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 437 |
+
|
| 438 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 439 |
+
raise ValueError(
|
| 440 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 441 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 445 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 446 |
+
raise ValueError(
|
| 447 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 448 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 449 |
+
f" {negative_prompt_embeds.shape}."
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
if (
|
| 453 |
+
not isinstance(image, torch.Tensor)
|
| 454 |
+
and not isinstance(image, PIL.Image.Image)
|
| 455 |
+
and not isinstance(image, np.ndarray)
|
| 456 |
+
and not isinstance(image, list)
|
| 457 |
+
):
|
| 458 |
+
raise ValueError(
|
| 459 |
+
f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
# verify batch size of prompt and image are same if image is a list or tensor or numpy array
|
| 463 |
+
if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray):
|
| 464 |
+
if prompt is not None and isinstance(prompt, str):
|
| 465 |
+
batch_size = 1
|
| 466 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 467 |
+
batch_size = len(prompt)
|
| 468 |
+
else:
|
| 469 |
+
batch_size = prompt_embeds.shape[0]
|
| 470 |
+
|
| 471 |
+
if isinstance(image, list):
|
| 472 |
+
image_batch_size = len(image)
|
| 473 |
+
else:
|
| 474 |
+
image_batch_size = image.shape[0]
|
| 475 |
+
if batch_size != image_batch_size:
|
| 476 |
+
raise ValueError(
|
| 477 |
+
f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
|
| 478 |
+
" Please make sure that passed `prompt` matches the batch size of `image`."
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# check noise level
|
| 482 |
+
if noise_level > self.config.max_noise_level:
|
| 483 |
+
raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
|
| 484 |
+
|
| 485 |
+
if (callback_steps is None) or (
|
| 486 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 487 |
+
):
|
| 488 |
+
raise ValueError(
|
| 489 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 490 |
+
f" {type(callback_steps)}."
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 494 |
+
shape = (batch_size, num_channels_latents, height, width)
|
| 495 |
+
if latents is None:
|
| 496 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 497 |
+
else:
|
| 498 |
+
if latents.shape != shape:
|
| 499 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 500 |
+
latents = latents.to(device)
|
| 501 |
+
|
| 502 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 503 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 504 |
+
return latents
|
| 505 |
+
|
| 506 |
+
# def upcast_vae(self):
|
| 507 |
+
# dtype = self.vae.dtype
|
| 508 |
+
# self.vae.to(dtype=torch.float32)
|
| 509 |
+
# use_torch_2_0_or_xformers = isinstance(
|
| 510 |
+
# self.vae.decoder.mid_block.attentions[0].processor,
|
| 511 |
+
# (
|
| 512 |
+
# AttnProcessor2_0,
|
| 513 |
+
# XFormersAttnProcessor,
|
| 514 |
+
# LoRAXFormersAttnProcessor,
|
| 515 |
+
# LoRAAttnProcessor2_0,
|
| 516 |
+
# ),
|
| 517 |
+
# )
|
| 518 |
+
# # if xformers or torch_2_0 is used attention block does not need
|
| 519 |
+
# # to be in float32 which can save lots of memory
|
| 520 |
+
# if use_torch_2_0_or_xformers:
|
| 521 |
+
# self.vae.post_quant_conv.to(dtype)
|
| 522 |
+
# self.vae.decoder.conv_in.to(dtype)
|
| 523 |
+
# self.vae.decoder.mid_block.to(dtype)
|
| 524 |
+
|
| 525 |
+
@torch.no_grad()
|
| 526 |
+
def __call__(
|
| 527 |
+
self,
|
| 528 |
+
prompt: Union[str, List[str]] = None,
|
| 529 |
+
rgb: PipelineImageInput = None,
|
| 530 |
+
depth: PipelineDepthInput = None,
|
| 531 |
+
num_inference_steps: int = 75,
|
| 532 |
+
guidance_scale: float = 9.0,
|
| 533 |
+
noise_level: int = 20,
|
| 534 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 535 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 536 |
+
eta: float = 0.0,
|
| 537 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 538 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 539 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 540 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 541 |
+
output_type: Optional[str] = "pil",
|
| 542 |
+
return_dict: bool = True,
|
| 543 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 544 |
+
callback_steps: int = 1,
|
| 545 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 546 |
+
target_res: Optional[List[int]] = [1024, 1024],
|
| 547 |
+
):
|
| 548 |
+
r"""
|
| 549 |
+
The call function to the pipeline for generation.
|
| 550 |
+
|
| 551 |
+
Args:
|
| 552 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 553 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 554 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
| 555 |
+
`Image` or tensor representing an image batch to be upscaled.
|
| 556 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 557 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 558 |
+
expense of slower inference.
|
| 559 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 560 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 561 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 562 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 563 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 564 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 565 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 566 |
+
The number of images to generate per prompt.
|
| 567 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 568 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 569 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 570 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 571 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 572 |
+
generation deterministic.
|
| 573 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 574 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 575 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 576 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 577 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 578 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 579 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 580 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 581 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 582 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 583 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 584 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 585 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 586 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 587 |
+
plain tuple.
|
| 588 |
+
callback (`Callable`, *optional*):
|
| 589 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 590 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 591 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 592 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 593 |
+
every step.
|
| 594 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 595 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 596 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 597 |
+
|
| 598 |
+
Examples:
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 602 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 603 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 604 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 605 |
+
"not-safe-for-work" (nsfw) content.
|
| 606 |
+
"""
|
| 607 |
+
# 1. Check inputs. Raise error if not correct
|
| 608 |
+
self.check_inputs(
|
| 609 |
+
prompt,
|
| 610 |
+
rgb,
|
| 611 |
+
noise_level,
|
| 612 |
+
callback_steps,
|
| 613 |
+
negative_prompt,
|
| 614 |
+
prompt_embeds,
|
| 615 |
+
negative_prompt_embeds,
|
| 616 |
+
)
|
| 617 |
+
# 2. Define call parameters
|
| 618 |
+
if prompt is not None and isinstance(prompt, str):
|
| 619 |
+
batch_size = 1
|
| 620 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 621 |
+
batch_size = len(prompt)
|
| 622 |
+
else:
|
| 623 |
+
batch_size = prompt_embeds.shape[0]
|
| 624 |
+
|
| 625 |
+
device = self._execution_device
|
| 626 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 627 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 628 |
+
# corresponds to doing no classifier free guidance.
|
| 629 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 630 |
+
|
| 631 |
+
# 3. Encode input prompt
|
| 632 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 633 |
+
prompt,
|
| 634 |
+
device,
|
| 635 |
+
num_images_per_prompt,
|
| 636 |
+
do_classifier_free_guidance,
|
| 637 |
+
negative_prompt,
|
| 638 |
+
prompt_embeds=prompt_embeds,
|
| 639 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 640 |
+
)
|
| 641 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 642 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 643 |
+
# to avoid doing two forward passes
|
| 644 |
+
if do_classifier_free_guidance:
|
| 645 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 646 |
+
|
| 647 |
+
# 4. Preprocess image
|
| 648 |
+
rgb, depth = self.image_processor.preprocess(rgb, depth, target_res=target_res)
|
| 649 |
+
rgb = rgb.to(dtype=prompt_embeds.dtype, device=device)
|
| 650 |
+
depth = depth.to(dtype=prompt_embeds.dtype, device=device)
|
| 651 |
+
|
| 652 |
+
# 5. set timesteps
|
| 653 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 654 |
+
timesteps = self.scheduler.timesteps
|
| 655 |
+
|
| 656 |
+
# 6. Encode low resolutiom image to latent space
|
| 657 |
+
image = torch.cat([rgb, depth], axis=1)
|
| 658 |
+
latent_space_image = self.vae.encode(image).latent_dist.sample(generator)
|
| 659 |
+
latent_space_image *= self.vae.scaling_factor
|
| 660 |
+
noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)
|
| 661 |
+
# noise_rgb = randn_tensor(rgb.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
|
| 662 |
+
# rgb = self.low_res_scheduler.add_noise(rgb, noise_rgb, noise_level)
|
| 663 |
+
# noise_depth = randn_tensor(depth.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
|
| 664 |
+
# depth = self.low_res_scheduler.add_noise(depth, noise_depth, noise_level)
|
| 665 |
+
|
| 666 |
+
batch_multiplier = 2 if do_classifier_free_guidance else 1
|
| 667 |
+
latent_space_image = torch.cat([latent_space_image] * batch_multiplier * num_images_per_prompt)
|
| 668 |
+
noise_level = torch.cat([noise_level] * latent_space_image.shape[0])
|
| 669 |
+
|
| 670 |
+
# 7. Prepare latent variables
|
| 671 |
+
height, width = latent_space_image.shape[2:]
|
| 672 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 673 |
+
|
| 674 |
+
latents = self.prepare_latents(
|
| 675 |
+
batch_size * num_images_per_prompt,
|
| 676 |
+
num_channels_latents,
|
| 677 |
+
height,
|
| 678 |
+
width,
|
| 679 |
+
prompt_embeds.dtype,
|
| 680 |
+
device,
|
| 681 |
+
generator,
|
| 682 |
+
latents,
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
# 8. Check that sizes of image and latents match
|
| 686 |
+
num_channels_image = latent_space_image.shape[1]
|
| 687 |
+
if num_channels_latents + num_channels_image != self.unet.config.in_channels:
|
| 688 |
+
raise ValueError(
|
| 689 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 690 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 691 |
+
f" `num_channels_image`: {num_channels_image} "
|
| 692 |
+
f" = {num_channels_latents+num_channels_image}. Please verify the config of"
|
| 693 |
+
" `pipeline.unet` or your `image` input."
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 697 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 698 |
+
|
| 699 |
+
# 10. Denoising loop
|
| 700 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 701 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 702 |
+
for i, t in enumerate(timesteps):
|
| 703 |
+
# expand the latents if we are doing classifier free guidance
|
| 704 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 705 |
+
|
| 706 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 707 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 708 |
+
latent_model_input = torch.cat([latent_model_input, latent_space_image], dim=1)
|
| 709 |
+
|
| 710 |
+
# predict the noise residual
|
| 711 |
+
noise_pred = self.unet(
|
| 712 |
+
latent_model_input,
|
| 713 |
+
t,
|
| 714 |
+
encoder_hidden_states=prompt_embeds,
|
| 715 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 716 |
+
class_labels=noise_level,
|
| 717 |
+
return_dict=False,
|
| 718 |
+
)[0]
|
| 719 |
+
|
| 720 |
+
# perform guidance
|
| 721 |
+
if do_classifier_free_guidance:
|
| 722 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 723 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 724 |
+
|
| 725 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 726 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 727 |
+
|
| 728 |
+
# call the callback, if provided
|
| 729 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 730 |
+
progress_bar.update()
|
| 731 |
+
if callback is not None and i % callback_steps == 0:
|
| 732 |
+
callback(i, t, latents)
|
| 733 |
+
|
| 734 |
+
if not output_type == "latent":
|
| 735 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 736 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 737 |
+
|
| 738 |
+
if needs_upcasting:
|
| 739 |
+
self.upcast_vae()
|
| 740 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 741 |
+
|
| 742 |
+
image = self.vae.decode(latents / self.vae.scaling_factor, return_dict=False)[0]
|
| 743 |
+
|
| 744 |
+
# cast back to fp16 if needed
|
| 745 |
+
if needs_upcasting:
|
| 746 |
+
self.vae.to(dtype=torch.float16)
|
| 747 |
+
|
| 748 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 749 |
+
|
| 750 |
+
else:
|
| 751 |
+
image = latents
|
| 752 |
+
has_nsfw_concept = None
|
| 753 |
+
|
| 754 |
+
if has_nsfw_concept is None:
|
| 755 |
+
do_denormalize = [True] * image.shape[0]
|
| 756 |
+
else:
|
| 757 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 758 |
+
|
| 759 |
+
rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 760 |
+
|
| 761 |
+
# 11. Apply watermark
|
| 762 |
+
if output_type == "pil" and self.watermarker is not None:
|
| 763 |
+
rgb = self.watermarker.apply_watermark(rgb)
|
| 764 |
+
|
| 765 |
+
# Offload last model to CPU
|
| 766 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 767 |
+
self.final_offload_hook.offload()
|
| 768 |
+
|
| 769 |
+
if not return_dict:
|
| 770 |
+
return ((rgb, depth), has_nsfw_concept)
|
| 771 |
+
|
| 772 |
+
return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/pipeline_stable_diffusion_xl_controlnet_adapter.py
ADDED
|
@@ -0,0 +1,1406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import PIL.Image
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 24 |
+
|
| 25 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 26 |
+
from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
|
| 27 |
+
from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
|
| 28 |
+
from diffusers.models.attention_processor import (
|
| 29 |
+
AttnProcessor2_0,
|
| 30 |
+
LoRAAttnProcessor2_0,
|
| 31 |
+
LoRAXFormersAttnProcessor,
|
| 32 |
+
XFormersAttnProcessor,
|
| 33 |
+
)
|
| 34 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 35 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 36 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 37 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 38 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 39 |
+
from diffusers.utils import (
|
| 40 |
+
PIL_INTERPOLATION,
|
| 41 |
+
USE_PEFT_BACKEND,
|
| 42 |
+
logging,
|
| 43 |
+
replace_example_docstring,
|
| 44 |
+
scale_lora_layers,
|
| 45 |
+
unscale_lora_layers,
|
| 46 |
+
)
|
| 47 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 51 |
+
|
| 52 |
+
EXAMPLE_DOC_STRING = """
|
| 53 |
+
Examples:
|
| 54 |
+
```py
|
| 55 |
+
>>> import torch
|
| 56 |
+
>>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler
|
| 57 |
+
>>> from diffusers.utils import load_image
|
| 58 |
+
>>> from controlnet_aux.midas import MidasDetector
|
| 59 |
+
|
| 60 |
+
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
| 61 |
+
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
| 62 |
+
|
| 63 |
+
>>> image = load_image(img_url).resize((1024, 1024))
|
| 64 |
+
>>> mask_image = load_image(mask_url).resize((1024, 1024))
|
| 65 |
+
|
| 66 |
+
>>> midas_depth = MidasDetector.from_pretrained(
|
| 67 |
+
... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
|
| 68 |
+
... ).to("cuda")
|
| 69 |
+
|
| 70 |
+
>>> depth_image = midas_depth(
|
| 71 |
+
... image, detect_resolution=512, image_resolution=1024
|
| 72 |
+
... )
|
| 73 |
+
|
| 74 |
+
>>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 75 |
+
|
| 76 |
+
>>> adapter = T2IAdapter.from_pretrained(
|
| 77 |
+
... "Adapter/t2iadapter",
|
| 78 |
+
... subfolder="sketch_sdxl_1.0",
|
| 79 |
+
... torch_dtype=torch.float16,
|
| 80 |
+
... adapter_type="full_adapter_xl",
|
| 81 |
+
... )
|
| 82 |
+
|
| 83 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 84 |
+
... "diffusers/controlnet-depth-sdxl-1.0",
|
| 85 |
+
... torch_dtype=torch.float16,
|
| 86 |
+
... variant="fp16",
|
| 87 |
+
... use_safetensors=True
|
| 88 |
+
... ).to("cuda")
|
| 89 |
+
|
| 90 |
+
>>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
| 91 |
+
|
| 92 |
+
>>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
| 93 |
+
... model_id,
|
| 94 |
+
... adapter=adapter,
|
| 95 |
+
... controlnet=controlnet,
|
| 96 |
+
... torch_dtype=torch.float16,
|
| 97 |
+
... variant="fp16",
|
| 98 |
+
... scheduler=scheduler
|
| 99 |
+
... ).to("cuda")
|
| 100 |
+
|
| 101 |
+
>>> strength = 0.5
|
| 102 |
+
|
| 103 |
+
>>> generator = torch.manual_seed(42)
|
| 104 |
+
>>> sketch_image_out = pipe(
|
| 105 |
+
... prompt="a photo of a tiger sitting on a park bench",
|
| 106 |
+
... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
|
| 107 |
+
... adapter_image=depth_image,
|
| 108 |
+
... control_image=mask_image,
|
| 109 |
+
... adapter_conditioning_scale=strength,
|
| 110 |
+
... controlnet_conditioning_scale=strength,
|
| 111 |
+
... generator=generator,
|
| 112 |
+
... guidance_scale=7.5,
|
| 113 |
+
... ).images[0]
|
| 114 |
+
```
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _preprocess_adapter_image(image, height, width):
|
| 119 |
+
if isinstance(image, torch.Tensor):
|
| 120 |
+
return image
|
| 121 |
+
elif isinstance(image, PIL.Image.Image):
|
| 122 |
+
image = [image]
|
| 123 |
+
|
| 124 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 125 |
+
image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
|
| 126 |
+
image = [
|
| 127 |
+
i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
|
| 128 |
+
] # expand [h, w] or [h, w, c] to [b, h, w, c]
|
| 129 |
+
image = np.concatenate(image, axis=0)
|
| 130 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 131 |
+
image = image.transpose(0, 3, 1, 2)
|
| 132 |
+
image = torch.from_numpy(image)
|
| 133 |
+
elif isinstance(image[0], torch.Tensor):
|
| 134 |
+
if image[0].ndim == 3:
|
| 135 |
+
image = torch.stack(image, dim=0)
|
| 136 |
+
elif image[0].ndim == 4:
|
| 137 |
+
image = torch.cat(image, dim=0)
|
| 138 |
+
else:
|
| 139 |
+
raise ValueError(
|
| 140 |
+
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
|
| 141 |
+
)
|
| 142 |
+
return image
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 146 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 147 |
+
"""
|
| 148 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 149 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 150 |
+
"""
|
| 151 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 152 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 153 |
+
# rescale the results from guidance (fixes overexposure)
|
| 154 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 155 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 156 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 157 |
+
return noise_cfg
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class StableDiffusionXLControlNetAdapterPipeline(
|
| 161 |
+
DiffusionPipeline,
|
| 162 |
+
StableDiffusionMixin,
|
| 163 |
+
FromSingleFileMixin,
|
| 164 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 165 |
+
TextualInversionLoaderMixin,
|
| 166 |
+
):
|
| 167 |
+
r"""
|
| 168 |
+
Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
|
| 169 |
+
https://arxiv.org/abs/2302.08453
|
| 170 |
+
|
| 171 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 172 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
|
| 176 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
|
| 177 |
+
list, the outputs from each Adapter are added together to create one combined additional conditioning.
|
| 178 |
+
adapter_weights (`List[float]`, *optional*, defaults to None):
|
| 179 |
+
List of floats representing the weight which will be multiply to each adapter's output before adding them
|
| 180 |
+
together.
|
| 181 |
+
vae ([`AutoencoderKL`]):
|
| 182 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 183 |
+
text_encoder ([`CLIPTextModel`]):
|
| 184 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 185 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 186 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 187 |
+
tokenizer (`CLIPTokenizer`):
|
| 188 |
+
Tokenizer of class
|
| 189 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 190 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 191 |
+
scheduler ([`SchedulerMixin`]):
|
| 192 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 193 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 194 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 195 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 196 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 197 |
+
feature_extractor ([`CLIPFeatureExtractor`]):
|
| 198 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 202 |
+
_optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
|
| 203 |
+
|
| 204 |
+
def __init__(
|
| 205 |
+
self,
|
| 206 |
+
vae: AutoencoderKL,
|
| 207 |
+
text_encoder: CLIPTextModel,
|
| 208 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 209 |
+
tokenizer: CLIPTokenizer,
|
| 210 |
+
tokenizer_2: CLIPTokenizer,
|
| 211 |
+
unet: UNet2DConditionModel,
|
| 212 |
+
adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
|
| 213 |
+
controlnet: Union[ControlNetModel, MultiControlNetModel],
|
| 214 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 215 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 216 |
+
):
|
| 217 |
+
super().__init__()
|
| 218 |
+
|
| 219 |
+
if isinstance(controlnet, (list, tuple)):
|
| 220 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 221 |
+
|
| 222 |
+
self.register_modules(
|
| 223 |
+
vae=vae,
|
| 224 |
+
text_encoder=text_encoder,
|
| 225 |
+
text_encoder_2=text_encoder_2,
|
| 226 |
+
tokenizer=tokenizer,
|
| 227 |
+
tokenizer_2=tokenizer_2,
|
| 228 |
+
unet=unet,
|
| 229 |
+
adapter=adapter,
|
| 230 |
+
controlnet=controlnet,
|
| 231 |
+
scheduler=scheduler,
|
| 232 |
+
)
|
| 233 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 234 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 235 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 236 |
+
self.control_image_processor = VaeImageProcessor(
|
| 237 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 238 |
+
)
|
| 239 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 240 |
+
|
| 241 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
|
| 242 |
+
def encode_prompt(
|
| 243 |
+
self,
|
| 244 |
+
prompt: str,
|
| 245 |
+
prompt_2: Optional[str] = None,
|
| 246 |
+
device: Optional[torch.device] = None,
|
| 247 |
+
num_images_per_prompt: int = 1,
|
| 248 |
+
do_classifier_free_guidance: bool = True,
|
| 249 |
+
negative_prompt: Optional[str] = None,
|
| 250 |
+
negative_prompt_2: Optional[str] = None,
|
| 251 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 252 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 253 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 254 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 255 |
+
lora_scale: Optional[float] = None,
|
| 256 |
+
clip_skip: Optional[int] = None,
|
| 257 |
+
):
|
| 258 |
+
r"""
|
| 259 |
+
Encodes the prompt into text encoder hidden states.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 263 |
+
prompt to be encoded
|
| 264 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 265 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 266 |
+
used in both text-encoders
|
| 267 |
+
device: (`torch.device`):
|
| 268 |
+
torch device
|
| 269 |
+
num_images_per_prompt (`int`):
|
| 270 |
+
number of images that should be generated per prompt
|
| 271 |
+
do_classifier_free_guidance (`bool`):
|
| 272 |
+
whether to use classifier free guidance or not
|
| 273 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 274 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 275 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 276 |
+
less than `1`).
|
| 277 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 278 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 279 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 280 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 281 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 282 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 283 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 284 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 285 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 286 |
+
argument.
|
| 287 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 288 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 289 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 290 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 291 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 292 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 293 |
+
input argument.
|
| 294 |
+
lora_scale (`float`, *optional*):
|
| 295 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 296 |
+
clip_skip (`int`, *optional*):
|
| 297 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 298 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 299 |
+
"""
|
| 300 |
+
device = device or self._execution_device
|
| 301 |
+
|
| 302 |
+
# set lora scale so that monkey patched LoRA
|
| 303 |
+
# function of text encoder can correctly access it
|
| 304 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 305 |
+
self._lora_scale = lora_scale
|
| 306 |
+
|
| 307 |
+
# dynamically adjust the LoRA scale
|
| 308 |
+
if self.text_encoder is not None:
|
| 309 |
+
if not USE_PEFT_BACKEND:
|
| 310 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 311 |
+
else:
|
| 312 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 313 |
+
|
| 314 |
+
if self.text_encoder_2 is not None:
|
| 315 |
+
if not USE_PEFT_BACKEND:
|
| 316 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 317 |
+
else:
|
| 318 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 319 |
+
|
| 320 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 321 |
+
|
| 322 |
+
if prompt is not None:
|
| 323 |
+
batch_size = len(prompt)
|
| 324 |
+
else:
|
| 325 |
+
batch_size = prompt_embeds.shape[0]
|
| 326 |
+
|
| 327 |
+
# Define tokenizers and text encoders
|
| 328 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 329 |
+
text_encoders = (
|
| 330 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if prompt_embeds is None:
|
| 334 |
+
prompt_2 = prompt_2 or prompt
|
| 335 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 336 |
+
|
| 337 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 338 |
+
prompt_embeds_list = []
|
| 339 |
+
prompts = [prompt, prompt_2]
|
| 340 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 341 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 342 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 343 |
+
|
| 344 |
+
text_inputs = tokenizer(
|
| 345 |
+
prompt,
|
| 346 |
+
padding="max_length",
|
| 347 |
+
max_length=tokenizer.model_max_length,
|
| 348 |
+
truncation=True,
|
| 349 |
+
return_tensors="pt",
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
text_input_ids = text_inputs.input_ids
|
| 353 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 354 |
+
|
| 355 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 356 |
+
text_input_ids, untruncated_ids
|
| 357 |
+
):
|
| 358 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 359 |
+
logger.warning(
|
| 360 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 361 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 365 |
+
|
| 366 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 367 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 368 |
+
if clip_skip is None:
|
| 369 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 370 |
+
else:
|
| 371 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 372 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 373 |
+
|
| 374 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 375 |
+
|
| 376 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 377 |
+
|
| 378 |
+
# get unconditional embeddings for classifier free guidance
|
| 379 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 380 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 381 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 382 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 383 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 384 |
+
negative_prompt = negative_prompt or ""
|
| 385 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 386 |
+
|
| 387 |
+
# normalize str to list
|
| 388 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 389 |
+
negative_prompt_2 = (
|
| 390 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
uncond_tokens: List[str]
|
| 394 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 395 |
+
raise TypeError(
|
| 396 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 397 |
+
f" {type(prompt)}."
|
| 398 |
+
)
|
| 399 |
+
elif batch_size != len(negative_prompt):
|
| 400 |
+
raise ValueError(
|
| 401 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 402 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 403 |
+
" the batch size of `prompt`."
|
| 404 |
+
)
|
| 405 |
+
else:
|
| 406 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 407 |
+
|
| 408 |
+
negative_prompt_embeds_list = []
|
| 409 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 410 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 411 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 412 |
+
|
| 413 |
+
max_length = prompt_embeds.shape[1]
|
| 414 |
+
uncond_input = tokenizer(
|
| 415 |
+
negative_prompt,
|
| 416 |
+
padding="max_length",
|
| 417 |
+
max_length=max_length,
|
| 418 |
+
truncation=True,
|
| 419 |
+
return_tensors="pt",
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
negative_prompt_embeds = text_encoder(
|
| 423 |
+
uncond_input.input_ids.to(device),
|
| 424 |
+
output_hidden_states=True,
|
| 425 |
+
)
|
| 426 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 427 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 428 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 429 |
+
|
| 430 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 431 |
+
|
| 432 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 433 |
+
|
| 434 |
+
if self.text_encoder_2 is not None:
|
| 435 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 436 |
+
else:
|
| 437 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 438 |
+
|
| 439 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 440 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 441 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 442 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 443 |
+
|
| 444 |
+
if do_classifier_free_guidance:
|
| 445 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 446 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 447 |
+
|
| 448 |
+
if self.text_encoder_2 is not None:
|
| 449 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 450 |
+
else:
|
| 451 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 452 |
+
|
| 453 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 454 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 455 |
+
|
| 456 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 457 |
+
bs_embed * num_images_per_prompt, -1
|
| 458 |
+
)
|
| 459 |
+
if do_classifier_free_guidance:
|
| 460 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 461 |
+
bs_embed * num_images_per_prompt, -1
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
if self.text_encoder is not None:
|
| 465 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 466 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 467 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 468 |
+
|
| 469 |
+
if self.text_encoder_2 is not None:
|
| 470 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 471 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 472 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 473 |
+
|
| 474 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 475 |
+
|
| 476 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 477 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 478 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 479 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 480 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 481 |
+
# and should be between [0, 1]
|
| 482 |
+
|
| 483 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 484 |
+
extra_step_kwargs = {}
|
| 485 |
+
if accepts_eta:
|
| 486 |
+
extra_step_kwargs["eta"] = eta
|
| 487 |
+
|
| 488 |
+
# check if the scheduler accepts generator
|
| 489 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 490 |
+
if accepts_generator:
|
| 491 |
+
extra_step_kwargs["generator"] = generator
|
| 492 |
+
return extra_step_kwargs
|
| 493 |
+
|
| 494 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 495 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 496 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 497 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 498 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 499 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 500 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 501 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 502 |
+
|
| 503 |
+
if (
|
| 504 |
+
not image_is_pil
|
| 505 |
+
and not image_is_tensor
|
| 506 |
+
and not image_is_np
|
| 507 |
+
and not image_is_pil_list
|
| 508 |
+
and not image_is_tensor_list
|
| 509 |
+
and not image_is_np_list
|
| 510 |
+
):
|
| 511 |
+
raise TypeError(
|
| 512 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
if image_is_pil:
|
| 516 |
+
image_batch_size = 1
|
| 517 |
+
else:
|
| 518 |
+
image_batch_size = len(image)
|
| 519 |
+
|
| 520 |
+
if prompt is not None and isinstance(prompt, str):
|
| 521 |
+
prompt_batch_size = 1
|
| 522 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 523 |
+
prompt_batch_size = len(prompt)
|
| 524 |
+
elif prompt_embeds is not None:
|
| 525 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 526 |
+
|
| 527 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 528 |
+
raise ValueError(
|
| 529 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
|
| 533 |
+
def check_inputs(
|
| 534 |
+
self,
|
| 535 |
+
prompt,
|
| 536 |
+
prompt_2,
|
| 537 |
+
height,
|
| 538 |
+
width,
|
| 539 |
+
callback_steps,
|
| 540 |
+
negative_prompt=None,
|
| 541 |
+
negative_prompt_2=None,
|
| 542 |
+
prompt_embeds=None,
|
| 543 |
+
negative_prompt_embeds=None,
|
| 544 |
+
pooled_prompt_embeds=None,
|
| 545 |
+
negative_pooled_prompt_embeds=None,
|
| 546 |
+
callback_on_step_end_tensor_inputs=None,
|
| 547 |
+
):
|
| 548 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 549 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 550 |
+
|
| 551 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 552 |
+
raise ValueError(
|
| 553 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 554 |
+
f" {type(callback_steps)}."
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 558 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 559 |
+
):
|
| 560 |
+
raise ValueError(
|
| 561 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
if prompt is not None and prompt_embeds is not None:
|
| 565 |
+
raise ValueError(
|
| 566 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 567 |
+
" only forward one of the two."
|
| 568 |
+
)
|
| 569 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 570 |
+
raise ValueError(
|
| 571 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 572 |
+
" only forward one of the two."
|
| 573 |
+
)
|
| 574 |
+
elif prompt is None and prompt_embeds is None:
|
| 575 |
+
raise ValueError(
|
| 576 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 577 |
+
)
|
| 578 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 579 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 580 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 581 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 582 |
+
|
| 583 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 584 |
+
raise ValueError(
|
| 585 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 586 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 587 |
+
)
|
| 588 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 589 |
+
raise ValueError(
|
| 590 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 591 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 595 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 596 |
+
raise ValueError(
|
| 597 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 598 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 599 |
+
f" {negative_prompt_embeds.shape}."
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 603 |
+
raise ValueError(
|
| 604 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 608 |
+
raise ValueError(
|
| 609 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
def check_conditions(
|
| 613 |
+
self,
|
| 614 |
+
prompt,
|
| 615 |
+
prompt_embeds,
|
| 616 |
+
adapter_image,
|
| 617 |
+
control_image,
|
| 618 |
+
adapter_conditioning_scale,
|
| 619 |
+
controlnet_conditioning_scale,
|
| 620 |
+
control_guidance_start,
|
| 621 |
+
control_guidance_end,
|
| 622 |
+
):
|
| 623 |
+
# controlnet checks
|
| 624 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 625 |
+
control_guidance_start = [control_guidance_start]
|
| 626 |
+
|
| 627 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 628 |
+
control_guidance_end = [control_guidance_end]
|
| 629 |
+
|
| 630 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 631 |
+
raise ValueError(
|
| 632 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 636 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 637 |
+
raise ValueError(
|
| 638 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 642 |
+
if start >= end:
|
| 643 |
+
raise ValueError(
|
| 644 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 645 |
+
)
|
| 646 |
+
if start < 0.0:
|
| 647 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 648 |
+
if end > 1.0:
|
| 649 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 650 |
+
|
| 651 |
+
# Check controlnet `image`
|
| 652 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 653 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 654 |
+
)
|
| 655 |
+
if (
|
| 656 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 657 |
+
or is_compiled
|
| 658 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 659 |
+
):
|
| 660 |
+
self.check_image(control_image, prompt, prompt_embeds)
|
| 661 |
+
elif (
|
| 662 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 663 |
+
or is_compiled
|
| 664 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 665 |
+
):
|
| 666 |
+
if not isinstance(control_image, list):
|
| 667 |
+
raise TypeError("For multiple controlnets: `control_image` must be type `list`")
|
| 668 |
+
|
| 669 |
+
# When `image` is a nested list:
|
| 670 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 671 |
+
elif any(isinstance(i, list) for i in control_image):
|
| 672 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 673 |
+
elif len(control_image) != len(self.controlnet.nets):
|
| 674 |
+
raise ValueError(
|
| 675 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
for image_ in control_image:
|
| 679 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 680 |
+
else:
|
| 681 |
+
assert False
|
| 682 |
+
|
| 683 |
+
# Check `controlnet_conditioning_scale`
|
| 684 |
+
if (
|
| 685 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 686 |
+
or is_compiled
|
| 687 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 688 |
+
):
|
| 689 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 690 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 691 |
+
elif (
|
| 692 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 693 |
+
or is_compiled
|
| 694 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 695 |
+
):
|
| 696 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 697 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 698 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 699 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 700 |
+
self.controlnet.nets
|
| 701 |
+
):
|
| 702 |
+
raise ValueError(
|
| 703 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 704 |
+
" the same length as the number of controlnets"
|
| 705 |
+
)
|
| 706 |
+
else:
|
| 707 |
+
assert False
|
| 708 |
+
|
| 709 |
+
# adapter checks
|
| 710 |
+
if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
|
| 711 |
+
self.check_image(adapter_image, prompt, prompt_embeds)
|
| 712 |
+
elif (
|
| 713 |
+
isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
|
| 714 |
+
):
|
| 715 |
+
if not isinstance(adapter_image, list):
|
| 716 |
+
raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
|
| 717 |
+
|
| 718 |
+
# When `image` is a nested list:
|
| 719 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 720 |
+
elif any(isinstance(i, list) for i in adapter_image):
|
| 721 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 722 |
+
elif len(adapter_image) != len(self.adapter.adapters):
|
| 723 |
+
raise ValueError(
|
| 724 |
+
f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
for image_ in adapter_image:
|
| 728 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 729 |
+
else:
|
| 730 |
+
assert False
|
| 731 |
+
|
| 732 |
+
# Check `adapter_conditioning_scale`
|
| 733 |
+
if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
|
| 734 |
+
if not isinstance(adapter_conditioning_scale, float):
|
| 735 |
+
raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
|
| 736 |
+
elif (
|
| 737 |
+
isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
|
| 738 |
+
):
|
| 739 |
+
if isinstance(adapter_conditioning_scale, list):
|
| 740 |
+
if any(isinstance(i, list) for i in adapter_conditioning_scale):
|
| 741 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 742 |
+
elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
|
| 743 |
+
self.adapter.adapters
|
| 744 |
+
):
|
| 745 |
+
raise ValueError(
|
| 746 |
+
"For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
|
| 747 |
+
" the same length as the number of adapters"
|
| 748 |
+
)
|
| 749 |
+
else:
|
| 750 |
+
assert False
|
| 751 |
+
|
| 752 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 753 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 754 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 755 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 756 |
+
raise ValueError(
|
| 757 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 758 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
if latents is None:
|
| 762 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 763 |
+
else:
|
| 764 |
+
latents = latents.to(device)
|
| 765 |
+
|
| 766 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 767 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 768 |
+
return latents
|
| 769 |
+
|
| 770 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
|
| 771 |
+
def _get_add_time_ids(
|
| 772 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
| 773 |
+
):
|
| 774 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 775 |
+
|
| 776 |
+
passed_add_embed_dim = (
|
| 777 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 778 |
+
)
|
| 779 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 780 |
+
|
| 781 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 782 |
+
raise ValueError(
|
| 783 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 787 |
+
return add_time_ids
|
| 788 |
+
|
| 789 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 790 |
+
def upcast_vae(self):
|
| 791 |
+
dtype = self.vae.dtype
|
| 792 |
+
self.vae.to(dtype=torch.float32)
|
| 793 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 794 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 795 |
+
(
|
| 796 |
+
AttnProcessor2_0,
|
| 797 |
+
XFormersAttnProcessor,
|
| 798 |
+
LoRAXFormersAttnProcessor,
|
| 799 |
+
LoRAAttnProcessor2_0,
|
| 800 |
+
),
|
| 801 |
+
)
|
| 802 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 803 |
+
# to be in float32 which can save lots of memory
|
| 804 |
+
if use_torch_2_0_or_xformers:
|
| 805 |
+
self.vae.post_quant_conv.to(dtype)
|
| 806 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 807 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 808 |
+
|
| 809 |
+
# Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
|
| 810 |
+
def _default_height_width(self, height, width, image):
|
| 811 |
+
# NOTE: It is possible that a list of images have different
|
| 812 |
+
# dimensions for each image, so just checking the first image
|
| 813 |
+
# is not _exactly_ correct, but it is simple.
|
| 814 |
+
while isinstance(image, list):
|
| 815 |
+
image = image[0]
|
| 816 |
+
|
| 817 |
+
if height is None:
|
| 818 |
+
if isinstance(image, PIL.Image.Image):
|
| 819 |
+
height = image.height
|
| 820 |
+
elif isinstance(image, torch.Tensor):
|
| 821 |
+
height = image.shape[-2]
|
| 822 |
+
|
| 823 |
+
# round down to nearest multiple of `self.adapter.downscale_factor`
|
| 824 |
+
height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
|
| 825 |
+
|
| 826 |
+
if width is None:
|
| 827 |
+
if isinstance(image, PIL.Image.Image):
|
| 828 |
+
width = image.width
|
| 829 |
+
elif isinstance(image, torch.Tensor):
|
| 830 |
+
width = image.shape[-1]
|
| 831 |
+
|
| 832 |
+
# round down to nearest multiple of `self.adapter.downscale_factor`
|
| 833 |
+
width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
|
| 834 |
+
|
| 835 |
+
return height, width
|
| 836 |
+
|
| 837 |
+
def prepare_control_image(
|
| 838 |
+
self,
|
| 839 |
+
image,
|
| 840 |
+
width,
|
| 841 |
+
height,
|
| 842 |
+
batch_size,
|
| 843 |
+
num_images_per_prompt,
|
| 844 |
+
device,
|
| 845 |
+
dtype,
|
| 846 |
+
do_classifier_free_guidance=False,
|
| 847 |
+
guess_mode=False,
|
| 848 |
+
):
|
| 849 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 850 |
+
image_batch_size = image.shape[0]
|
| 851 |
+
|
| 852 |
+
if image_batch_size == 1:
|
| 853 |
+
repeat_by = batch_size
|
| 854 |
+
else:
|
| 855 |
+
# image batch size is the same as prompt batch size
|
| 856 |
+
repeat_by = num_images_per_prompt
|
| 857 |
+
|
| 858 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 859 |
+
|
| 860 |
+
image = image.to(device=device, dtype=dtype)
|
| 861 |
+
|
| 862 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 863 |
+
image = torch.cat([image] * 2)
|
| 864 |
+
|
| 865 |
+
return image
|
| 866 |
+
|
| 867 |
+
@torch.no_grad()
|
| 868 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 869 |
+
def __call__(
|
| 870 |
+
self,
|
| 871 |
+
prompt: Union[str, List[str]] = None,
|
| 872 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 873 |
+
adapter_image: PipelineImageInput = None,
|
| 874 |
+
control_image: PipelineImageInput = None,
|
| 875 |
+
height: Optional[int] = None,
|
| 876 |
+
width: Optional[int] = None,
|
| 877 |
+
num_inference_steps: int = 50,
|
| 878 |
+
denoising_end: Optional[float] = None,
|
| 879 |
+
guidance_scale: float = 5.0,
|
| 880 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 881 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 882 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 883 |
+
eta: float = 0.0,
|
| 884 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 885 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 886 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 887 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 888 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 889 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 890 |
+
output_type: Optional[str] = "pil",
|
| 891 |
+
return_dict: bool = True,
|
| 892 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 893 |
+
callback_steps: int = 1,
|
| 894 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 895 |
+
guidance_rescale: float = 0.0,
|
| 896 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 897 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 898 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 899 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 900 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 901 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 902 |
+
adapter_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 903 |
+
adapter_conditioning_factor: float = 1.0,
|
| 904 |
+
clip_skip: Optional[int] = None,
|
| 905 |
+
controlnet_conditioning_scale=1.0,
|
| 906 |
+
guess_mode: bool = False,
|
| 907 |
+
control_guidance_start: float = 0.0,
|
| 908 |
+
control_guidance_end: float = 1.0,
|
| 909 |
+
):
|
| 910 |
+
r"""
|
| 911 |
+
Function invoked when calling the pipeline for generation.
|
| 912 |
+
|
| 913 |
+
Args:
|
| 914 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 915 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 916 |
+
instead.
|
| 917 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 918 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 919 |
+
used in both text-encoders
|
| 920 |
+
adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
|
| 921 |
+
The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
|
| 922 |
+
type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
|
| 923 |
+
accepted as an image. The control image is automatically resized to fit the output image.
|
| 924 |
+
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 925 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 926 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
|
| 927 |
+
specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
|
| 928 |
+
accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 929 |
+
and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
|
| 930 |
+
`init`, images must be passed as a list such that each element of the list can be correctly batched for
|
| 931 |
+
input to a single ControlNet.
|
| 932 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 933 |
+
The height in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 934 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 935 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 936 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 937 |
+
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 938 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 939 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 940 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 941 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 942 |
+
expense of slower inference.
|
| 943 |
+
denoising_end (`float`, *optional*):
|
| 944 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 945 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 946 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 947 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 948 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 949 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 950 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 951 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 952 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 953 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 954 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 955 |
+
usually at the expense of lower image quality.
|
| 956 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 957 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 958 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 959 |
+
less than `1`).
|
| 960 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 961 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 962 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 963 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 964 |
+
The number of images to generate per prompt.
|
| 965 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 966 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 967 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 968 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 969 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 970 |
+
to make generation deterministic.
|
| 971 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 972 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 973 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 974 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 975 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 976 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 977 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 978 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 979 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 980 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 981 |
+
argument.
|
| 982 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 983 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 984 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 985 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 986 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 987 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 988 |
+
input argument.
|
| 989 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 990 |
+
The output format of the generate image. Choose between
|
| 991 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 992 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 993 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
|
| 994 |
+
instead of a plain tuple.
|
| 995 |
+
callback (`Callable`, *optional*):
|
| 996 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 997 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 998 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 999 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1000 |
+
called at every step.
|
| 1001 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1002 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1003 |
+
`self.processor` in
|
| 1004 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1005 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 1006 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 1007 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 1008 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 1009 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 1010 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1011 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 1012 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 1013 |
+
explained in section 2.2 of
|
| 1014 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1015 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1016 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1017 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1018 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1019 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1020 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1021 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1022 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 1023 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1024 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1025 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1026 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 1027 |
+
micro-conditioning as explained in section 2.2 of
|
| 1028 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1029 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1030 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1031 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 1032 |
+
micro-conditioning as explained in section 2.2 of
|
| 1033 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1034 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1035 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1036 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 1037 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1038 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1039 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1040 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1041 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
|
| 1042 |
+
residual in the original unet. If multiple adapters are specified in init, you can set the
|
| 1043 |
+
corresponding scale as a list.
|
| 1044 |
+
adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1045 |
+
The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
|
| 1046 |
+
residual in the original unet. If multiple adapters are specified in init, you can set the
|
| 1047 |
+
corresponding scale as a list.
|
| 1048 |
+
adapter_conditioning_factor (`float`, *optional*, defaults to 1.0):
|
| 1049 |
+
The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is
|
| 1050 |
+
`0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for
|
| 1051 |
+
all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps.
|
| 1052 |
+
clip_skip (`int`, *optional*):
|
| 1053 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1054 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1055 |
+
|
| 1056 |
+
Examples:
|
| 1057 |
+
|
| 1058 |
+
Returns:
|
| 1059 |
+
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
|
| 1060 |
+
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
|
| 1061 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 1062 |
+
"""
|
| 1063 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 1064 |
+
adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
|
| 1065 |
+
|
| 1066 |
+
# 0. Default height and width to unet
|
| 1067 |
+
|
| 1068 |
+
height, width = self._default_height_width(height, width, adapter_image)
|
| 1069 |
+
device = self._execution_device
|
| 1070 |
+
|
| 1071 |
+
if isinstance(adapter, MultiAdapter):
|
| 1072 |
+
adapter_input = []
|
| 1073 |
+
|
| 1074 |
+
for one_image in adapter_image:
|
| 1075 |
+
one_image = _preprocess_adapter_image(one_image, height, width)
|
| 1076 |
+
one_image = one_image.to(device=device, dtype=adapter.dtype)
|
| 1077 |
+
adapter_input.append(one_image)
|
| 1078 |
+
else:
|
| 1079 |
+
adapter_input = _preprocess_adapter_image(adapter_image, height, width)
|
| 1080 |
+
adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
|
| 1081 |
+
original_size = original_size or (height, width)
|
| 1082 |
+
target_size = target_size or (height, width)
|
| 1083 |
+
|
| 1084 |
+
# 0.1 align format for control guidance
|
| 1085 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 1086 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1087 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1088 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1089 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 1090 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 1091 |
+
control_guidance_start, control_guidance_end = (
|
| 1092 |
+
mult * [control_guidance_start],
|
| 1093 |
+
mult * [control_guidance_end],
|
| 1094 |
+
)
|
| 1095 |
+
|
| 1096 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1097 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1098 |
+
if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
|
| 1099 |
+
adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters)
|
| 1100 |
+
|
| 1101 |
+
# 1. Check inputs. Raise error if not correct
|
| 1102 |
+
self.check_inputs(
|
| 1103 |
+
prompt,
|
| 1104 |
+
prompt_2,
|
| 1105 |
+
height,
|
| 1106 |
+
width,
|
| 1107 |
+
callback_steps,
|
| 1108 |
+
negative_prompt=negative_prompt,
|
| 1109 |
+
negative_prompt_2=negative_prompt_2,
|
| 1110 |
+
prompt_embeds=prompt_embeds,
|
| 1111 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1112 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1113 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1114 |
+
)
|
| 1115 |
+
|
| 1116 |
+
self.check_conditions(
|
| 1117 |
+
prompt,
|
| 1118 |
+
prompt_embeds,
|
| 1119 |
+
adapter_image,
|
| 1120 |
+
control_image,
|
| 1121 |
+
adapter_conditioning_scale,
|
| 1122 |
+
controlnet_conditioning_scale,
|
| 1123 |
+
control_guidance_start,
|
| 1124 |
+
control_guidance_end,
|
| 1125 |
+
)
|
| 1126 |
+
|
| 1127 |
+
# 2. Define call parameters
|
| 1128 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1129 |
+
batch_size = 1
|
| 1130 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1131 |
+
batch_size = len(prompt)
|
| 1132 |
+
else:
|
| 1133 |
+
batch_size = prompt_embeds.shape[0]
|
| 1134 |
+
|
| 1135 |
+
device = self._execution_device
|
| 1136 |
+
|
| 1137 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1138 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1139 |
+
# corresponds to doing no classifier free guidance.
|
| 1140 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 1141 |
+
|
| 1142 |
+
# 3. Encode input prompt
|
| 1143 |
+
(
|
| 1144 |
+
prompt_embeds,
|
| 1145 |
+
negative_prompt_embeds,
|
| 1146 |
+
pooled_prompt_embeds,
|
| 1147 |
+
negative_pooled_prompt_embeds,
|
| 1148 |
+
) = self.encode_prompt(
|
| 1149 |
+
prompt=prompt,
|
| 1150 |
+
prompt_2=prompt_2,
|
| 1151 |
+
device=device,
|
| 1152 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1153 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1154 |
+
negative_prompt=negative_prompt,
|
| 1155 |
+
negative_prompt_2=negative_prompt_2,
|
| 1156 |
+
prompt_embeds=prompt_embeds,
|
| 1157 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1158 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1159 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1160 |
+
clip_skip=clip_skip,
|
| 1161 |
+
)
|
| 1162 |
+
|
| 1163 |
+
# 4. Prepare timesteps
|
| 1164 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1165 |
+
|
| 1166 |
+
timesteps = self.scheduler.timesteps
|
| 1167 |
+
|
| 1168 |
+
# 5. Prepare latent variables
|
| 1169 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1170 |
+
latents = self.prepare_latents(
|
| 1171 |
+
batch_size * num_images_per_prompt,
|
| 1172 |
+
num_channels_latents,
|
| 1173 |
+
height,
|
| 1174 |
+
width,
|
| 1175 |
+
prompt_embeds.dtype,
|
| 1176 |
+
device,
|
| 1177 |
+
generator,
|
| 1178 |
+
latents,
|
| 1179 |
+
)
|
| 1180 |
+
|
| 1181 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1182 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1183 |
+
|
| 1184 |
+
# 7. Prepare added time ids & embeddings & adapter features
|
| 1185 |
+
if isinstance(adapter, MultiAdapter):
|
| 1186 |
+
adapter_state = adapter(adapter_input, adapter_conditioning_scale)
|
| 1187 |
+
for k, v in enumerate(adapter_state):
|
| 1188 |
+
adapter_state[k] = v
|
| 1189 |
+
else:
|
| 1190 |
+
adapter_state = adapter(adapter_input)
|
| 1191 |
+
for k, v in enumerate(adapter_state):
|
| 1192 |
+
adapter_state[k] = v * adapter_conditioning_scale
|
| 1193 |
+
if num_images_per_prompt > 1:
|
| 1194 |
+
for k, v in enumerate(adapter_state):
|
| 1195 |
+
adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
|
| 1196 |
+
if do_classifier_free_guidance:
|
| 1197 |
+
for k, v in enumerate(adapter_state):
|
| 1198 |
+
adapter_state[k] = torch.cat([v] * 2, dim=0)
|
| 1199 |
+
|
| 1200 |
+
# 7.2 Prepare control images
|
| 1201 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1202 |
+
control_image = self.prepare_control_image(
|
| 1203 |
+
image=control_image,
|
| 1204 |
+
width=width,
|
| 1205 |
+
height=height,
|
| 1206 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1207 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1208 |
+
device=device,
|
| 1209 |
+
dtype=controlnet.dtype,
|
| 1210 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1211 |
+
guess_mode=guess_mode,
|
| 1212 |
+
)
|
| 1213 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1214 |
+
control_images = []
|
| 1215 |
+
|
| 1216 |
+
for control_image_ in control_image:
|
| 1217 |
+
control_image_ = self.prepare_control_image(
|
| 1218 |
+
image=control_image_,
|
| 1219 |
+
width=width,
|
| 1220 |
+
height=height,
|
| 1221 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1222 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1223 |
+
device=device,
|
| 1224 |
+
dtype=controlnet.dtype,
|
| 1225 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1226 |
+
guess_mode=guess_mode,
|
| 1227 |
+
)
|
| 1228 |
+
|
| 1229 |
+
control_images.append(control_image_)
|
| 1230 |
+
|
| 1231 |
+
control_image = control_images
|
| 1232 |
+
else:
|
| 1233 |
+
raise ValueError(f"{controlnet.__class__} is not supported.")
|
| 1234 |
+
|
| 1235 |
+
# 8.2 Create tensor stating which controlnets to keep
|
| 1236 |
+
controlnet_keep = []
|
| 1237 |
+
for i in range(len(timesteps)):
|
| 1238 |
+
keeps = [
|
| 1239 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1240 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1241 |
+
]
|
| 1242 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 1243 |
+
controlnet_keep.append(keeps)
|
| 1244 |
+
else:
|
| 1245 |
+
controlnet_keep.append(keeps[0])
|
| 1246 |
+
|
| 1247 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1248 |
+
if self.text_encoder_2 is None:
|
| 1249 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1250 |
+
else:
|
| 1251 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1252 |
+
|
| 1253 |
+
add_time_ids = self._get_add_time_ids(
|
| 1254 |
+
original_size,
|
| 1255 |
+
crops_coords_top_left,
|
| 1256 |
+
target_size,
|
| 1257 |
+
dtype=prompt_embeds.dtype,
|
| 1258 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1259 |
+
)
|
| 1260 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1261 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1262 |
+
negative_original_size,
|
| 1263 |
+
negative_crops_coords_top_left,
|
| 1264 |
+
negative_target_size,
|
| 1265 |
+
dtype=prompt_embeds.dtype,
|
| 1266 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1267 |
+
)
|
| 1268 |
+
else:
|
| 1269 |
+
negative_add_time_ids = add_time_ids
|
| 1270 |
+
|
| 1271 |
+
if do_classifier_free_guidance:
|
| 1272 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1273 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1274 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 1275 |
+
|
| 1276 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1277 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1278 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1279 |
+
|
| 1280 |
+
# 8. Denoising loop
|
| 1281 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1282 |
+
|
| 1283 |
+
# 7.1 Apply denoising_end
|
| 1284 |
+
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
| 1285 |
+
discrete_timestep_cutoff = int(
|
| 1286 |
+
round(
|
| 1287 |
+
self.scheduler.config.num_train_timesteps
|
| 1288 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1289 |
+
)
|
| 1290 |
+
)
|
| 1291 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1292 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1293 |
+
|
| 1294 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1295 |
+
for i, t in enumerate(timesteps):
|
| 1296 |
+
# expand the latents if we are doing classifier free guidance
|
| 1297 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1298 |
+
|
| 1299 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1300 |
+
|
| 1301 |
+
# predict the noise residual
|
| 1302 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1303 |
+
|
| 1304 |
+
if i < int(num_inference_steps * adapter_conditioning_factor):
|
| 1305 |
+
down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
|
| 1306 |
+
else:
|
| 1307 |
+
down_intrablock_additional_residuals = None
|
| 1308 |
+
|
| 1309 |
+
# ----------- ControlNet
|
| 1310 |
+
|
| 1311 |
+
# expand the latents if we are doing classifier free guidance
|
| 1312 |
+
latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1313 |
+
|
| 1314 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 1315 |
+
latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
|
| 1316 |
+
|
| 1317 |
+
# controlnet(s) inference
|
| 1318 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1319 |
+
# Infer ControlNet only for the conditional batch.
|
| 1320 |
+
control_model_input = latents
|
| 1321 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1322 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1323 |
+
controlnet_added_cond_kwargs = {
|
| 1324 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1325 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1326 |
+
}
|
| 1327 |
+
else:
|
| 1328 |
+
control_model_input = latent_model_input_controlnet
|
| 1329 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1330 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1331 |
+
|
| 1332 |
+
if isinstance(controlnet_keep[i], list):
|
| 1333 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1334 |
+
else:
|
| 1335 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1336 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1337 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1338 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1339 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1340 |
+
control_model_input,
|
| 1341 |
+
t,
|
| 1342 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1343 |
+
controlnet_cond=control_image,
|
| 1344 |
+
conditioning_scale=cond_scale,
|
| 1345 |
+
guess_mode=guess_mode,
|
| 1346 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1347 |
+
return_dict=False,
|
| 1348 |
+
)
|
| 1349 |
+
|
| 1350 |
+
noise_pred = self.unet(
|
| 1351 |
+
latent_model_input,
|
| 1352 |
+
t,
|
| 1353 |
+
encoder_hidden_states=prompt_embeds,
|
| 1354 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1355 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1356 |
+
return_dict=False,
|
| 1357 |
+
down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter
|
| 1358 |
+
down_block_additional_residuals=down_block_res_samples, # controlnet
|
| 1359 |
+
mid_block_additional_residual=mid_block_res_sample, # controlnet
|
| 1360 |
+
)[0]
|
| 1361 |
+
|
| 1362 |
+
# perform guidance
|
| 1363 |
+
if do_classifier_free_guidance:
|
| 1364 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1365 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1366 |
+
|
| 1367 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1368 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1369 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 1370 |
+
|
| 1371 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1372 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1373 |
+
|
| 1374 |
+
# call the callback, if provided
|
| 1375 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1376 |
+
progress_bar.update()
|
| 1377 |
+
if callback is not None and i % callback_steps == 0:
|
| 1378 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1379 |
+
callback(step_idx, t, latents)
|
| 1380 |
+
|
| 1381 |
+
if not output_type == "latent":
|
| 1382 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1383 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1384 |
+
|
| 1385 |
+
if needs_upcasting:
|
| 1386 |
+
self.upcast_vae()
|
| 1387 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1388 |
+
|
| 1389 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1390 |
+
|
| 1391 |
+
# cast back to fp16 if needed
|
| 1392 |
+
if needs_upcasting:
|
| 1393 |
+
self.vae.to(dtype=torch.float16)
|
| 1394 |
+
else:
|
| 1395 |
+
image = latents
|
| 1396 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1397 |
+
|
| 1398 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1399 |
+
|
| 1400 |
+
# Offload all models
|
| 1401 |
+
self.maybe_free_model_hooks()
|
| 1402 |
+
|
| 1403 |
+
if not return_dict:
|
| 1404 |
+
return (image,)
|
| 1405 |
+
|
| 1406 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.27.0/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py
ADDED
|
@@ -0,0 +1,1850 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Jake Babbidge, TencentARC and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# ignore the entire file for precommit
|
| 16 |
+
# type: ignore
|
| 17 |
+
|
| 18 |
+
import inspect
|
| 19 |
+
from collections.abc import Callable
|
| 20 |
+
from typing import Any, List, Optional, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import PIL
|
| 24 |
+
import torch
|
| 25 |
+
import torch.nn.functional as F
|
| 26 |
+
from transformers import (
|
| 27 |
+
CLIPTextModel,
|
| 28 |
+
CLIPTextModelWithProjection,
|
| 29 |
+
CLIPTokenizer,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
from diffusers import DiffusionPipeline
|
| 33 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 34 |
+
from diffusers.loaders import (
|
| 35 |
+
FromSingleFileMixin,
|
| 36 |
+
LoraLoaderMixin,
|
| 37 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 38 |
+
TextualInversionLoaderMixin,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.models import (
|
| 41 |
+
AutoencoderKL,
|
| 42 |
+
ControlNetModel,
|
| 43 |
+
MultiAdapter,
|
| 44 |
+
T2IAdapter,
|
| 45 |
+
UNet2DConditionModel,
|
| 46 |
+
)
|
| 47 |
+
from diffusers.models.attention_processor import (
|
| 48 |
+
AttnProcessor2_0,
|
| 49 |
+
LoRAAttnProcessor2_0,
|
| 50 |
+
LoRAXFormersAttnProcessor,
|
| 51 |
+
XFormersAttnProcessor,
|
| 52 |
+
)
|
| 53 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 54 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 55 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 56 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 57 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 58 |
+
from diffusers.utils import (
|
| 59 |
+
PIL_INTERPOLATION,
|
| 60 |
+
USE_PEFT_BACKEND,
|
| 61 |
+
logging,
|
| 62 |
+
replace_example_docstring,
|
| 63 |
+
scale_lora_layers,
|
| 64 |
+
unscale_lora_layers,
|
| 65 |
+
)
|
| 66 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 70 |
+
|
| 71 |
+
EXAMPLE_DOC_STRING = """
|
| 72 |
+
Examples:
|
| 73 |
+
```py
|
| 74 |
+
>>> import torch
|
| 75 |
+
>>> from diffusers import DiffusionPipeline, T2IAdapter
|
| 76 |
+
>>> from diffusers.utils import load_image
|
| 77 |
+
>>> from PIL import Image
|
| 78 |
+
>>> from controlnet_aux.midas import MidasDetector
|
| 79 |
+
|
| 80 |
+
>>> adapter = T2IAdapter.from_pretrained(
|
| 81 |
+
... "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
|
| 82 |
+
... ).to("cuda")
|
| 83 |
+
|
| 84 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 85 |
+
... "diffusers/controlnet-depth-sdxl-1.0",
|
| 86 |
+
... torch_dtype=torch.float16,
|
| 87 |
+
... variant="fp16",
|
| 88 |
+
... use_safetensors=True
|
| 89 |
+
... ).to("cuda")
|
| 90 |
+
|
| 91 |
+
>>> pipe = DiffusionPipeline.from_pretrained(
|
| 92 |
+
... "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
| 93 |
+
... torch_dtype=torch.float16,
|
| 94 |
+
... variant="fp16",
|
| 95 |
+
... use_safetensors=True,
|
| 96 |
+
... custom_pipeline="stable_diffusion_xl_adapter_controlnet_inpaint",
|
| 97 |
+
... adapter=adapter,
|
| 98 |
+
... controlnet=controlnet,
|
| 99 |
+
... ).to("cuda")
|
| 100 |
+
|
| 101 |
+
>>> prompt = "a tiger sitting on a park bench"
|
| 102 |
+
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
| 103 |
+
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
| 104 |
+
|
| 105 |
+
>>> image = load_image(img_url).resize((1024, 1024))
|
| 106 |
+
>>> mask_image = load_image(mask_url).resize((1024, 1024))
|
| 107 |
+
|
| 108 |
+
>>> midas_depth = MidasDetector.from_pretrained(
|
| 109 |
+
... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
|
| 110 |
+
... ).to("cuda")
|
| 111 |
+
|
| 112 |
+
>>> depth_image = midas_depth(
|
| 113 |
+
... image, detect_resolution=512, image_resolution=1024
|
| 114 |
+
... )
|
| 115 |
+
|
| 116 |
+
>>> strength = 0.4
|
| 117 |
+
|
| 118 |
+
>>> generator = torch.manual_seed(42)
|
| 119 |
+
|
| 120 |
+
>>> result_image = pipe(
|
| 121 |
+
... image=image,
|
| 122 |
+
... mask_image=mask,
|
| 123 |
+
... adapter_image=depth_image,
|
| 124 |
+
... control_image=depth_image,
|
| 125 |
+
... controlnet_conditioning_scale=strength,
|
| 126 |
+
... adapter_conditioning_scale=strength,
|
| 127 |
+
... strength=0.7,
|
| 128 |
+
... generator=generator,
|
| 129 |
+
... prompt=prompt,
|
| 130 |
+
... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
|
| 131 |
+
... num_inference_steps=50
|
| 132 |
+
... ).images[0]
|
| 133 |
+
```
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _preprocess_adapter_image(image, height, width):
|
| 138 |
+
if isinstance(image, torch.Tensor):
|
| 139 |
+
return image
|
| 140 |
+
elif isinstance(image, PIL.Image.Image):
|
| 141 |
+
image = [image]
|
| 142 |
+
|
| 143 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 144 |
+
image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
|
| 145 |
+
image = [
|
| 146 |
+
i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
|
| 147 |
+
] # expand [h, w] or [h, w, c] to [b, h, w, c]
|
| 148 |
+
image = np.concatenate(image, axis=0)
|
| 149 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 150 |
+
image = image.transpose(0, 3, 1, 2)
|
| 151 |
+
image = torch.from_numpy(image)
|
| 152 |
+
elif isinstance(image[0], torch.Tensor):
|
| 153 |
+
if image[0].ndim == 3:
|
| 154 |
+
image = torch.stack(image, dim=0)
|
| 155 |
+
elif image[0].ndim == 4:
|
| 156 |
+
image = torch.cat(image, dim=0)
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError(
|
| 159 |
+
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
|
| 160 |
+
)
|
| 161 |
+
return image
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def mask_pil_to_torch(mask, height, width):
|
| 165 |
+
# preprocess mask
|
| 166 |
+
if isinstance(mask, Union[PIL.Image.Image, np.ndarray]):
|
| 167 |
+
mask = [mask]
|
| 168 |
+
|
| 169 |
+
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
|
| 170 |
+
mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
|
| 171 |
+
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
|
| 172 |
+
mask = mask.astype(np.float32) / 255.0
|
| 173 |
+
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
|
| 174 |
+
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
|
| 175 |
+
|
| 176 |
+
mask = torch.from_numpy(mask)
|
| 177 |
+
return mask
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
|
| 181 |
+
"""
|
| 182 |
+
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
|
| 183 |
+
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
|
| 184 |
+
``image`` and ``1`` for the ``mask``.
|
| 185 |
+
|
| 186 |
+
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
|
| 187 |
+
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
|
| 191 |
+
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
|
| 192 |
+
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
|
| 193 |
+
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
|
| 194 |
+
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
|
| 195 |
+
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
Raises:
|
| 199 |
+
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
|
| 200 |
+
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
|
| 201 |
+
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
|
| 202 |
+
(ot the other way around).
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
|
| 206 |
+
dimensions: ``batch x channels x height x width``.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
# checkpoint. TOD(Yiyi) - need to clean this up later
|
| 210 |
+
if image is None:
|
| 211 |
+
raise ValueError("`image` input cannot be undefined.")
|
| 212 |
+
|
| 213 |
+
if mask is None:
|
| 214 |
+
raise ValueError("`mask_image` input cannot be undefined.")
|
| 215 |
+
|
| 216 |
+
if isinstance(image, torch.Tensor):
|
| 217 |
+
if not isinstance(mask, torch.Tensor):
|
| 218 |
+
mask = mask_pil_to_torch(mask, height, width)
|
| 219 |
+
|
| 220 |
+
if image.ndim == 3:
|
| 221 |
+
image = image.unsqueeze(0)
|
| 222 |
+
|
| 223 |
+
# Batch and add channel dim for single mask
|
| 224 |
+
if mask.ndim == 2:
|
| 225 |
+
mask = mask.unsqueeze(0).unsqueeze(0)
|
| 226 |
+
|
| 227 |
+
# Batch single mask or add channel dim
|
| 228 |
+
if mask.ndim == 3:
|
| 229 |
+
# Single batched mask, no channel dim or single mask not batched but channel dim
|
| 230 |
+
if mask.shape[0] == 1:
|
| 231 |
+
mask = mask.unsqueeze(0)
|
| 232 |
+
|
| 233 |
+
# Batched masks no channel dim
|
| 234 |
+
else:
|
| 235 |
+
mask = mask.unsqueeze(1)
|
| 236 |
+
|
| 237 |
+
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
|
| 238 |
+
# assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
|
| 239 |
+
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
|
| 240 |
+
|
| 241 |
+
# Check image is in [-1, 1]
|
| 242 |
+
# if image.min() < -1 or image.max() > 1:
|
| 243 |
+
# raise ValueError("Image should be in [-1, 1] range")
|
| 244 |
+
|
| 245 |
+
# Check mask is in [0, 1]
|
| 246 |
+
if mask.min() < 0 or mask.max() > 1:
|
| 247 |
+
raise ValueError("Mask should be in [0, 1] range")
|
| 248 |
+
|
| 249 |
+
# Binarize mask
|
| 250 |
+
mask[mask < 0.5] = 0
|
| 251 |
+
mask[mask >= 0.5] = 1
|
| 252 |
+
|
| 253 |
+
# Image as float32
|
| 254 |
+
image = image.to(dtype=torch.float32)
|
| 255 |
+
elif isinstance(mask, torch.Tensor):
|
| 256 |
+
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
|
| 257 |
+
else:
|
| 258 |
+
# preprocess image
|
| 259 |
+
if isinstance(image, Union[PIL.Image.Image, np.ndarray]):
|
| 260 |
+
image = [image]
|
| 261 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 262 |
+
# resize all images w.r.t passed height an width
|
| 263 |
+
image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
|
| 264 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 265 |
+
image = np.concatenate(image, axis=0)
|
| 266 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 267 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 268 |
+
|
| 269 |
+
image = image.transpose(0, 3, 1, 2)
|
| 270 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 271 |
+
|
| 272 |
+
mask = mask_pil_to_torch(mask, height, width)
|
| 273 |
+
mask[mask < 0.5] = 0
|
| 274 |
+
mask[mask >= 0.5] = 1
|
| 275 |
+
|
| 276 |
+
if image.shape[1] == 4:
|
| 277 |
+
# images are in latent space and thus can't
|
| 278 |
+
# be masked set masked_image to None
|
| 279 |
+
# we assume that the checkpoint is not an inpainting
|
| 280 |
+
# checkpoint. TOD(Yiyi) - need to clean this up later
|
| 281 |
+
masked_image = None
|
| 282 |
+
else:
|
| 283 |
+
masked_image = image * (mask < 0.5)
|
| 284 |
+
|
| 285 |
+
# n.b. ensure backwards compatibility as old function does not return image
|
| 286 |
+
if return_image:
|
| 287 |
+
return mask, masked_image, image
|
| 288 |
+
|
| 289 |
+
return mask, masked_image
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 293 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 294 |
+
"""
|
| 295 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 296 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 297 |
+
"""
|
| 298 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 299 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 300 |
+
# rescale the results from guidance (fixes overexposure)
|
| 301 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 302 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 303 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 304 |
+
return noise_cfg
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class StableDiffusionXLControlNetAdapterInpaintPipeline(
|
| 308 |
+
DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, LoraLoaderMixin
|
| 309 |
+
):
|
| 310 |
+
r"""
|
| 311 |
+
Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
|
| 312 |
+
https://arxiv.org/abs/2302.08453
|
| 313 |
+
|
| 314 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 315 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 316 |
+
|
| 317 |
+
Args:
|
| 318 |
+
adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
|
| 319 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
|
| 320 |
+
list, the outputs from each Adapter are added together to create one combined additional conditioning.
|
| 321 |
+
adapter_weights (`List[float]`, *optional*, defaults to None):
|
| 322 |
+
List of floats representing the weight which will be multiply to each adapter's output before adding them
|
| 323 |
+
together.
|
| 324 |
+
vae ([`AutoencoderKL`]):
|
| 325 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 326 |
+
text_encoder ([`CLIPTextModel`]):
|
| 327 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 328 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 329 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 330 |
+
tokenizer (`CLIPTokenizer`):
|
| 331 |
+
Tokenizer of class
|
| 332 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 333 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 334 |
+
scheduler ([`SchedulerMixin`]):
|
| 335 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 336 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 337 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 338 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 339 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 340 |
+
feature_extractor ([`CLIPFeatureExtractor`]):
|
| 341 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 342 |
+
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
| 343 |
+
Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
|
| 344 |
+
of `stabilityai/stable-diffusion-xl-refiner-1-0`.
|
| 345 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 346 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 347 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 348 |
+
"""
|
| 349 |
+
|
| 350 |
+
def __init__(
|
| 351 |
+
self,
|
| 352 |
+
vae: AutoencoderKL,
|
| 353 |
+
text_encoder: CLIPTextModel,
|
| 354 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 355 |
+
tokenizer: CLIPTokenizer,
|
| 356 |
+
tokenizer_2: CLIPTokenizer,
|
| 357 |
+
unet: UNet2DConditionModel,
|
| 358 |
+
adapter: Union[T2IAdapter, MultiAdapter],
|
| 359 |
+
controlnet: Union[ControlNetModel, MultiControlNetModel],
|
| 360 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 361 |
+
requires_aesthetics_score: bool = False,
|
| 362 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 363 |
+
):
|
| 364 |
+
super().__init__()
|
| 365 |
+
|
| 366 |
+
if isinstance(controlnet, (list, tuple)):
|
| 367 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 368 |
+
|
| 369 |
+
self.register_modules(
|
| 370 |
+
vae=vae,
|
| 371 |
+
text_encoder=text_encoder,
|
| 372 |
+
text_encoder_2=text_encoder_2,
|
| 373 |
+
tokenizer=tokenizer,
|
| 374 |
+
tokenizer_2=tokenizer_2,
|
| 375 |
+
unet=unet,
|
| 376 |
+
adapter=adapter,
|
| 377 |
+
controlnet=controlnet,
|
| 378 |
+
scheduler=scheduler,
|
| 379 |
+
)
|
| 380 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 381 |
+
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 382 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 383 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 384 |
+
self.control_image_processor = VaeImageProcessor(
|
| 385 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 386 |
+
)
|
| 387 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 388 |
+
|
| 389 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
|
| 390 |
+
def encode_prompt(
|
| 391 |
+
self,
|
| 392 |
+
prompt: str,
|
| 393 |
+
prompt_2: Optional[str] = None,
|
| 394 |
+
device: Optional[torch.device] = None,
|
| 395 |
+
num_images_per_prompt: int = 1,
|
| 396 |
+
do_classifier_free_guidance: bool = True,
|
| 397 |
+
negative_prompt: Optional[str] = None,
|
| 398 |
+
negative_prompt_2: Optional[str] = None,
|
| 399 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 400 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 401 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 402 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 403 |
+
lora_scale: Optional[float] = None,
|
| 404 |
+
clip_skip: Optional[int] = None,
|
| 405 |
+
):
|
| 406 |
+
r"""
|
| 407 |
+
Encodes the prompt into text encoder hidden states.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 411 |
+
prompt to be encoded
|
| 412 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 413 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 414 |
+
used in both text-encoders
|
| 415 |
+
device: (`torch.device`):
|
| 416 |
+
torch device
|
| 417 |
+
num_images_per_prompt (`int`):
|
| 418 |
+
number of images that should be generated per prompt
|
| 419 |
+
do_classifier_free_guidance (`bool`):
|
| 420 |
+
whether to use classifier free guidance or not
|
| 421 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 422 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 423 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 424 |
+
less than `1`).
|
| 425 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 426 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 427 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 428 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 429 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 430 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 431 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 432 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 433 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 434 |
+
argument.
|
| 435 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 436 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 437 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 438 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 439 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 440 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 441 |
+
input argument.
|
| 442 |
+
lora_scale (`float`, *optional*):
|
| 443 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 444 |
+
clip_skip (`int`, *optional*):
|
| 445 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 446 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 447 |
+
"""
|
| 448 |
+
device = device or self._execution_device
|
| 449 |
+
|
| 450 |
+
# set lora scale so that monkey patched LoRA
|
| 451 |
+
# function of text encoder can correctly access it
|
| 452 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 453 |
+
self._lora_scale = lora_scale
|
| 454 |
+
|
| 455 |
+
# dynamically adjust the LoRA scale
|
| 456 |
+
if self.text_encoder is not None:
|
| 457 |
+
if not USE_PEFT_BACKEND:
|
| 458 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 459 |
+
else:
|
| 460 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 461 |
+
|
| 462 |
+
if self.text_encoder_2 is not None:
|
| 463 |
+
if not USE_PEFT_BACKEND:
|
| 464 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 465 |
+
else:
|
| 466 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 467 |
+
|
| 468 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 469 |
+
|
| 470 |
+
if prompt is not None:
|
| 471 |
+
batch_size = len(prompt)
|
| 472 |
+
else:
|
| 473 |
+
batch_size = prompt_embeds.shape[0]
|
| 474 |
+
|
| 475 |
+
# Define tokenizers and text encoders
|
| 476 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 477 |
+
text_encoders = (
|
| 478 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
if prompt_embeds is None:
|
| 482 |
+
prompt_2 = prompt_2 or prompt
|
| 483 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 484 |
+
|
| 485 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 486 |
+
prompt_embeds_list = []
|
| 487 |
+
prompts = [prompt, prompt_2]
|
| 488 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 489 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 490 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 491 |
+
|
| 492 |
+
text_inputs = tokenizer(
|
| 493 |
+
prompt,
|
| 494 |
+
padding="max_length",
|
| 495 |
+
max_length=tokenizer.model_max_length,
|
| 496 |
+
truncation=True,
|
| 497 |
+
return_tensors="pt",
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
text_input_ids = text_inputs.input_ids
|
| 501 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 502 |
+
|
| 503 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 504 |
+
text_input_ids, untruncated_ids
|
| 505 |
+
):
|
| 506 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 507 |
+
logger.warning(
|
| 508 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 509 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 513 |
+
|
| 514 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 515 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 516 |
+
if clip_skip is None:
|
| 517 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 518 |
+
else:
|
| 519 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 520 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 521 |
+
|
| 522 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 523 |
+
|
| 524 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 525 |
+
|
| 526 |
+
# get unconditional embeddings for classifier free guidance
|
| 527 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 528 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 529 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 530 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 531 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 532 |
+
negative_prompt = negative_prompt or ""
|
| 533 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 534 |
+
|
| 535 |
+
# normalize str to list
|
| 536 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 537 |
+
negative_prompt_2 = (
|
| 538 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
uncond_tokens: List[str]
|
| 542 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 543 |
+
raise TypeError(
|
| 544 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 545 |
+
f" {type(prompt)}."
|
| 546 |
+
)
|
| 547 |
+
elif batch_size != len(negative_prompt):
|
| 548 |
+
raise ValueError(
|
| 549 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 550 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 551 |
+
" the batch size of `prompt`."
|
| 552 |
+
)
|
| 553 |
+
else:
|
| 554 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 555 |
+
|
| 556 |
+
negative_prompt_embeds_list = []
|
| 557 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 558 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 559 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 560 |
+
|
| 561 |
+
max_length = prompt_embeds.shape[1]
|
| 562 |
+
uncond_input = tokenizer(
|
| 563 |
+
negative_prompt,
|
| 564 |
+
padding="max_length",
|
| 565 |
+
max_length=max_length,
|
| 566 |
+
truncation=True,
|
| 567 |
+
return_tensors="pt",
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
negative_prompt_embeds = text_encoder(
|
| 571 |
+
uncond_input.input_ids.to(device),
|
| 572 |
+
output_hidden_states=True,
|
| 573 |
+
)
|
| 574 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 575 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 576 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 577 |
+
|
| 578 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 579 |
+
|
| 580 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 581 |
+
|
| 582 |
+
if self.text_encoder_2 is not None:
|
| 583 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 584 |
+
else:
|
| 585 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 586 |
+
|
| 587 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 588 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 589 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 590 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 591 |
+
|
| 592 |
+
if do_classifier_free_guidance:
|
| 593 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 594 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 595 |
+
|
| 596 |
+
if self.text_encoder_2 is not None:
|
| 597 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 598 |
+
else:
|
| 599 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 600 |
+
|
| 601 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 602 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 603 |
+
|
| 604 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 605 |
+
bs_embed * num_images_per_prompt, -1
|
| 606 |
+
)
|
| 607 |
+
if do_classifier_free_guidance:
|
| 608 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 609 |
+
bs_embed * num_images_per_prompt, -1
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
if self.text_encoder is not None:
|
| 613 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 614 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 615 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 616 |
+
|
| 617 |
+
if self.text_encoder_2 is not None:
|
| 618 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 619 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 620 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 621 |
+
|
| 622 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 623 |
+
|
| 624 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 625 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 626 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 627 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 628 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 629 |
+
# and should be between [0, 1]
|
| 630 |
+
|
| 631 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 632 |
+
extra_step_kwargs = {}
|
| 633 |
+
if accepts_eta:
|
| 634 |
+
extra_step_kwargs["eta"] = eta
|
| 635 |
+
|
| 636 |
+
# check if the scheduler accepts generator
|
| 637 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 638 |
+
if accepts_generator:
|
| 639 |
+
extra_step_kwargs["generator"] = generator
|
| 640 |
+
return extra_step_kwargs
|
| 641 |
+
|
| 642 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 643 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 644 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 645 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 646 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 647 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 648 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 649 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 650 |
+
|
| 651 |
+
if (
|
| 652 |
+
not image_is_pil
|
| 653 |
+
and not image_is_tensor
|
| 654 |
+
and not image_is_np
|
| 655 |
+
and not image_is_pil_list
|
| 656 |
+
and not image_is_tensor_list
|
| 657 |
+
and not image_is_np_list
|
| 658 |
+
):
|
| 659 |
+
raise TypeError(
|
| 660 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
if image_is_pil:
|
| 664 |
+
image_batch_size = 1
|
| 665 |
+
else:
|
| 666 |
+
image_batch_size = len(image)
|
| 667 |
+
|
| 668 |
+
if prompt is not None and isinstance(prompt, str):
|
| 669 |
+
prompt_batch_size = 1
|
| 670 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 671 |
+
prompt_batch_size = len(prompt)
|
| 672 |
+
elif prompt_embeds is not None:
|
| 673 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 674 |
+
|
| 675 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 676 |
+
raise ValueError(
|
| 677 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
|
| 681 |
+
def check_inputs(
|
| 682 |
+
self,
|
| 683 |
+
prompt,
|
| 684 |
+
prompt_2,
|
| 685 |
+
height,
|
| 686 |
+
width,
|
| 687 |
+
callback_steps,
|
| 688 |
+
negative_prompt=None,
|
| 689 |
+
negative_prompt_2=None,
|
| 690 |
+
prompt_embeds=None,
|
| 691 |
+
negative_prompt_embeds=None,
|
| 692 |
+
pooled_prompt_embeds=None,
|
| 693 |
+
negative_pooled_prompt_embeds=None,
|
| 694 |
+
callback_on_step_end_tensor_inputs=None,
|
| 695 |
+
):
|
| 696 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 697 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 698 |
+
|
| 699 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 700 |
+
raise ValueError(
|
| 701 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 702 |
+
f" {type(callback_steps)}."
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 706 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 707 |
+
):
|
| 708 |
+
raise ValueError(
|
| 709 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
if prompt is not None and prompt_embeds is not None:
|
| 713 |
+
raise ValueError(
|
| 714 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 715 |
+
" only forward one of the two."
|
| 716 |
+
)
|
| 717 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 718 |
+
raise ValueError(
|
| 719 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 720 |
+
" only forward one of the two."
|
| 721 |
+
)
|
| 722 |
+
elif prompt is None and prompt_embeds is None:
|
| 723 |
+
raise ValueError(
|
| 724 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 725 |
+
)
|
| 726 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 727 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 728 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 729 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 730 |
+
|
| 731 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 732 |
+
raise ValueError(
|
| 733 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 734 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 735 |
+
)
|
| 736 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 737 |
+
raise ValueError(
|
| 738 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 739 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 743 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 744 |
+
raise ValueError(
|
| 745 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 746 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 747 |
+
f" {negative_prompt_embeds.shape}."
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 751 |
+
raise ValueError(
|
| 752 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 756 |
+
raise ValueError(
|
| 757 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
def check_conditions(
|
| 761 |
+
self,
|
| 762 |
+
prompt,
|
| 763 |
+
prompt_embeds,
|
| 764 |
+
adapter_image,
|
| 765 |
+
control_image,
|
| 766 |
+
adapter_conditioning_scale,
|
| 767 |
+
controlnet_conditioning_scale,
|
| 768 |
+
control_guidance_start,
|
| 769 |
+
control_guidance_end,
|
| 770 |
+
):
|
| 771 |
+
# controlnet checks
|
| 772 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 773 |
+
control_guidance_start = [control_guidance_start]
|
| 774 |
+
|
| 775 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 776 |
+
control_guidance_end = [control_guidance_end]
|
| 777 |
+
|
| 778 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 779 |
+
raise ValueError(
|
| 780 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 784 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 785 |
+
raise ValueError(
|
| 786 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 790 |
+
if start >= end:
|
| 791 |
+
raise ValueError(
|
| 792 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 793 |
+
)
|
| 794 |
+
if start < 0.0:
|
| 795 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 796 |
+
if end > 1.0:
|
| 797 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 798 |
+
|
| 799 |
+
# Check controlnet `image`
|
| 800 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 801 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 802 |
+
)
|
| 803 |
+
if (
|
| 804 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 805 |
+
or is_compiled
|
| 806 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 807 |
+
):
|
| 808 |
+
self.check_image(control_image, prompt, prompt_embeds)
|
| 809 |
+
elif (
|
| 810 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 811 |
+
or is_compiled
|
| 812 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 813 |
+
):
|
| 814 |
+
if not isinstance(control_image, list):
|
| 815 |
+
raise TypeError("For multiple controlnets: `control_image` must be type `list`")
|
| 816 |
+
|
| 817 |
+
# When `image` is a nested list:
|
| 818 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 819 |
+
elif any(isinstance(i, list) for i in control_image):
|
| 820 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 821 |
+
elif len(control_image) != len(self.controlnet.nets):
|
| 822 |
+
raise ValueError(
|
| 823 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
for image_ in control_image:
|
| 827 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 828 |
+
else:
|
| 829 |
+
assert False
|
| 830 |
+
|
| 831 |
+
# Check `controlnet_conditioning_scale`
|
| 832 |
+
if (
|
| 833 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 834 |
+
or is_compiled
|
| 835 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 836 |
+
):
|
| 837 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 838 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 839 |
+
elif (
|
| 840 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 841 |
+
or is_compiled
|
| 842 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 843 |
+
):
|
| 844 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 845 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 846 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 847 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 848 |
+
self.controlnet.nets
|
| 849 |
+
):
|
| 850 |
+
raise ValueError(
|
| 851 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 852 |
+
" the same length as the number of controlnets"
|
| 853 |
+
)
|
| 854 |
+
else:
|
| 855 |
+
assert False
|
| 856 |
+
|
| 857 |
+
# adapter checks
|
| 858 |
+
if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
|
| 859 |
+
self.check_image(adapter_image, prompt, prompt_embeds)
|
| 860 |
+
elif (
|
| 861 |
+
isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
|
| 862 |
+
):
|
| 863 |
+
if not isinstance(adapter_image, list):
|
| 864 |
+
raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
|
| 865 |
+
|
| 866 |
+
# When `image` is a nested list:
|
| 867 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 868 |
+
elif any(isinstance(i, list) for i in adapter_image):
|
| 869 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 870 |
+
elif len(adapter_image) != len(self.adapter.adapters):
|
| 871 |
+
raise ValueError(
|
| 872 |
+
f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
|
| 873 |
+
)
|
| 874 |
+
|
| 875 |
+
for image_ in adapter_image:
|
| 876 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 877 |
+
else:
|
| 878 |
+
assert False
|
| 879 |
+
|
| 880 |
+
# Check `adapter_conditioning_scale`
|
| 881 |
+
if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
|
| 882 |
+
if not isinstance(adapter_conditioning_scale, float):
|
| 883 |
+
raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
|
| 884 |
+
elif (
|
| 885 |
+
isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
|
| 886 |
+
):
|
| 887 |
+
if isinstance(adapter_conditioning_scale, list):
|
| 888 |
+
if any(isinstance(i, list) for i in adapter_conditioning_scale):
|
| 889 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 890 |
+
elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
|
| 891 |
+
self.adapter.adapters
|
| 892 |
+
):
|
| 893 |
+
raise ValueError(
|
| 894 |
+
"For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
|
| 895 |
+
" the same length as the number of adapters"
|
| 896 |
+
)
|
| 897 |
+
else:
|
| 898 |
+
assert False
|
| 899 |
+
|
| 900 |
+
def prepare_latents(
|
| 901 |
+
self,
|
| 902 |
+
batch_size,
|
| 903 |
+
num_channels_latents,
|
| 904 |
+
height,
|
| 905 |
+
width,
|
| 906 |
+
dtype,
|
| 907 |
+
device,
|
| 908 |
+
generator,
|
| 909 |
+
latents=None,
|
| 910 |
+
image=None,
|
| 911 |
+
timestep=None,
|
| 912 |
+
is_strength_max=True,
|
| 913 |
+
add_noise=True,
|
| 914 |
+
return_noise=False,
|
| 915 |
+
return_image_latents=False,
|
| 916 |
+
):
|
| 917 |
+
shape = (
|
| 918 |
+
batch_size,
|
| 919 |
+
num_channels_latents,
|
| 920 |
+
height // self.vae_scale_factor,
|
| 921 |
+
width // self.vae_scale_factor,
|
| 922 |
+
)
|
| 923 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 924 |
+
raise ValueError(
|
| 925 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 926 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
if (image is None or timestep is None) and not is_strength_max:
|
| 930 |
+
raise ValueError(
|
| 931 |
+
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
|
| 932 |
+
"However, either the image or the noise timestep has not been provided."
|
| 933 |
+
)
|
| 934 |
+
|
| 935 |
+
if image.shape[1] == 4:
|
| 936 |
+
image_latents = image.to(device=device, dtype=dtype)
|
| 937 |
+
elif return_image_latents or (latents is None and not is_strength_max):
|
| 938 |
+
image = image.to(device=device, dtype=dtype)
|
| 939 |
+
image_latents = self._encode_vae_image(image=image, generator=generator)
|
| 940 |
+
|
| 941 |
+
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
|
| 942 |
+
|
| 943 |
+
if latents is None and add_noise:
|
| 944 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 945 |
+
# if strength is 1. then initialise the latents to noise, else initial to image + noise
|
| 946 |
+
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
|
| 947 |
+
# if pure noise then scale the initial latents by the Scheduler's init sigma
|
| 948 |
+
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
|
| 949 |
+
elif add_noise:
|
| 950 |
+
noise = latents.to(device)
|
| 951 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 952 |
+
else:
|
| 953 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 954 |
+
latents = image_latents.to(device)
|
| 955 |
+
|
| 956 |
+
outputs = (latents,)
|
| 957 |
+
|
| 958 |
+
if return_noise:
|
| 959 |
+
outputs += (noise,)
|
| 960 |
+
|
| 961 |
+
if return_image_latents:
|
| 962 |
+
outputs += (image_latents,)
|
| 963 |
+
|
| 964 |
+
return outputs
|
| 965 |
+
|
| 966 |
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
| 967 |
+
dtype = image.dtype
|
| 968 |
+
if self.vae.config.force_upcast:
|
| 969 |
+
image = image.float()
|
| 970 |
+
self.vae.to(dtype=torch.float32)
|
| 971 |
+
|
| 972 |
+
if isinstance(generator, list):
|
| 973 |
+
image_latents = [
|
| 974 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
|
| 975 |
+
for i in range(image.shape[0])
|
| 976 |
+
]
|
| 977 |
+
image_latents = torch.cat(image_latents, dim=0)
|
| 978 |
+
else:
|
| 979 |
+
image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
|
| 980 |
+
|
| 981 |
+
if self.vae.config.force_upcast:
|
| 982 |
+
self.vae.to(dtype)
|
| 983 |
+
|
| 984 |
+
image_latents = image_latents.to(dtype)
|
| 985 |
+
image_latents = self.vae.config.scaling_factor * image_latents
|
| 986 |
+
|
| 987 |
+
return image_latents
|
| 988 |
+
|
| 989 |
+
def prepare_mask_latents(
|
| 990 |
+
self,
|
| 991 |
+
mask,
|
| 992 |
+
masked_image,
|
| 993 |
+
batch_size,
|
| 994 |
+
height,
|
| 995 |
+
width,
|
| 996 |
+
dtype,
|
| 997 |
+
device,
|
| 998 |
+
generator,
|
| 999 |
+
do_classifier_free_guidance,
|
| 1000 |
+
):
|
| 1001 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 1002 |
+
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
| 1003 |
+
# and half precision
|
| 1004 |
+
mask = torch.nn.functional.interpolate(
|
| 1005 |
+
mask,
|
| 1006 |
+
size=(
|
| 1007 |
+
height // self.vae_scale_factor,
|
| 1008 |
+
width // self.vae_scale_factor,
|
| 1009 |
+
),
|
| 1010 |
+
)
|
| 1011 |
+
mask = mask.to(device=device, dtype=dtype)
|
| 1012 |
+
|
| 1013 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 1014 |
+
if mask.shape[0] < batch_size:
|
| 1015 |
+
if not batch_size % mask.shape[0] == 0:
|
| 1016 |
+
raise ValueError(
|
| 1017 |
+
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
| 1018 |
+
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
| 1019 |
+
" of masks that you pass is divisible by the total requested batch size."
|
| 1020 |
+
)
|
| 1021 |
+
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
| 1022 |
+
|
| 1023 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 1024 |
+
|
| 1025 |
+
masked_image_latents = None
|
| 1026 |
+
if masked_image is not None:
|
| 1027 |
+
masked_image = masked_image.to(device=device, dtype=dtype)
|
| 1028 |
+
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
|
| 1029 |
+
if masked_image_latents.shape[0] < batch_size:
|
| 1030 |
+
if not batch_size % masked_image_latents.shape[0] == 0:
|
| 1031 |
+
raise ValueError(
|
| 1032 |
+
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
| 1033 |
+
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
| 1034 |
+
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
| 1035 |
+
)
|
| 1036 |
+
masked_image_latents = masked_image_latents.repeat(
|
| 1037 |
+
batch_size // masked_image_latents.shape[0], 1, 1, 1
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
masked_image_latents = (
|
| 1041 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 1042 |
+
)
|
| 1043 |
+
|
| 1044 |
+
# aligning device to prevent device errors when concating it with the latent model input
|
| 1045 |
+
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
| 1046 |
+
|
| 1047 |
+
return mask, masked_image_latents
|
| 1048 |
+
|
| 1049 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
|
| 1050 |
+
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
|
| 1051 |
+
# get the original timestep using init_timestep
|
| 1052 |
+
if denoising_start is None:
|
| 1053 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 1054 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 1055 |
+
else:
|
| 1056 |
+
t_start = 0
|
| 1057 |
+
|
| 1058 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 1059 |
+
|
| 1060 |
+
# Strength is irrelevant if we directly request a timestep to start at;
|
| 1061 |
+
# that is, strength is determined by the denoising_start instead.
|
| 1062 |
+
if denoising_start is not None:
|
| 1063 |
+
discrete_timestep_cutoff = int(
|
| 1064 |
+
round(
|
| 1065 |
+
self.scheduler.config.num_train_timesteps
|
| 1066 |
+
- (denoising_start * self.scheduler.config.num_train_timesteps)
|
| 1067 |
+
)
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
|
| 1071 |
+
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
|
| 1072 |
+
# if the scheduler is a 2nd order scheduler we might have to do +1
|
| 1073 |
+
# because `num_inference_steps` might be even given that every timestep
|
| 1074 |
+
# (except the highest one) is duplicated. If `num_inference_steps` is even it would
|
| 1075 |
+
# mean that we cut the timesteps in the middle of the denoising step
|
| 1076 |
+
# (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
|
| 1077 |
+
# we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
|
| 1078 |
+
num_inference_steps = num_inference_steps + 1
|
| 1079 |
+
|
| 1080 |
+
# because t_n+1 >= t_n, we slice the timesteps starting from the end
|
| 1081 |
+
timesteps = timesteps[-num_inference_steps:]
|
| 1082 |
+
return timesteps, num_inference_steps
|
| 1083 |
+
|
| 1084 |
+
return timesteps, num_inference_steps - t_start
|
| 1085 |
+
|
| 1086 |
+
def _get_add_time_ids(
|
| 1087 |
+
self,
|
| 1088 |
+
original_size,
|
| 1089 |
+
crops_coords_top_left,
|
| 1090 |
+
target_size,
|
| 1091 |
+
aesthetic_score,
|
| 1092 |
+
negative_aesthetic_score,
|
| 1093 |
+
dtype,
|
| 1094 |
+
text_encoder_projection_dim=None,
|
| 1095 |
+
):
|
| 1096 |
+
if self.config.requires_aesthetics_score:
|
| 1097 |
+
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
|
| 1098 |
+
add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
|
| 1099 |
+
else:
|
| 1100 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 1101 |
+
add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 1102 |
+
|
| 1103 |
+
passed_add_embed_dim = (
|
| 1104 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 1105 |
+
)
|
| 1106 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 1107 |
+
|
| 1108 |
+
if (
|
| 1109 |
+
expected_add_embed_dim > passed_add_embed_dim
|
| 1110 |
+
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 1111 |
+
):
|
| 1112 |
+
raise ValueError(
|
| 1113 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
|
| 1114 |
+
)
|
| 1115 |
+
elif (
|
| 1116 |
+
expected_add_embed_dim < passed_add_embed_dim
|
| 1117 |
+
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 1118 |
+
):
|
| 1119 |
+
raise ValueError(
|
| 1120 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
|
| 1121 |
+
)
|
| 1122 |
+
elif expected_add_embed_dim != passed_add_embed_dim:
|
| 1123 |
+
raise ValueError(
|
| 1124 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 1125 |
+
)
|
| 1126 |
+
|
| 1127 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 1128 |
+
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
|
| 1129 |
+
|
| 1130 |
+
return add_time_ids, add_neg_time_ids
|
| 1131 |
+
|
| 1132 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 1133 |
+
def upcast_vae(self):
|
| 1134 |
+
dtype = self.vae.dtype
|
| 1135 |
+
self.vae.to(dtype=torch.float32)
|
| 1136 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 1137 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 1138 |
+
(
|
| 1139 |
+
AttnProcessor2_0,
|
| 1140 |
+
XFormersAttnProcessor,
|
| 1141 |
+
LoRAXFormersAttnProcessor,
|
| 1142 |
+
LoRAAttnProcessor2_0,
|
| 1143 |
+
),
|
| 1144 |
+
)
|
| 1145 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 1146 |
+
# to be in float32 which can save lots of memory
|
| 1147 |
+
if use_torch_2_0_or_xformers:
|
| 1148 |
+
self.vae.post_quant_conv.to(dtype)
|
| 1149 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 1150 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 1151 |
+
|
| 1152 |
+
# Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
|
| 1153 |
+
def _default_height_width(self, height, width, image):
|
| 1154 |
+
# NOTE: It is possible that a list of images have different
|
| 1155 |
+
# dimensions for each image, so just checking the first image
|
| 1156 |
+
# is not _exactly_ correct, but it is simple.
|
| 1157 |
+
while isinstance(image, list):
|
| 1158 |
+
image = image[0]
|
| 1159 |
+
|
| 1160 |
+
if height is None:
|
| 1161 |
+
if isinstance(image, PIL.Image.Image):
|
| 1162 |
+
height = image.height
|
| 1163 |
+
elif isinstance(image, torch.Tensor):
|
| 1164 |
+
height = image.shape[-2]
|
| 1165 |
+
|
| 1166 |
+
# round down to nearest multiple of `self.adapter.downscale_factor`
|
| 1167 |
+
height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
|
| 1168 |
+
|
| 1169 |
+
if width is None:
|
| 1170 |
+
if isinstance(image, PIL.Image.Image):
|
| 1171 |
+
width = image.width
|
| 1172 |
+
elif isinstance(image, torch.Tensor):
|
| 1173 |
+
width = image.shape[-1]
|
| 1174 |
+
|
| 1175 |
+
# round down to nearest multiple of `self.adapter.downscale_factor`
|
| 1176 |
+
width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
|
| 1177 |
+
|
| 1178 |
+
return height, width
|
| 1179 |
+
|
| 1180 |
+
def prepare_control_image(
|
| 1181 |
+
self,
|
| 1182 |
+
image,
|
| 1183 |
+
width,
|
| 1184 |
+
height,
|
| 1185 |
+
batch_size,
|
| 1186 |
+
num_images_per_prompt,
|
| 1187 |
+
device,
|
| 1188 |
+
dtype,
|
| 1189 |
+
do_classifier_free_guidance=False,
|
| 1190 |
+
guess_mode=False,
|
| 1191 |
+
):
|
| 1192 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 1193 |
+
image_batch_size = image.shape[0]
|
| 1194 |
+
|
| 1195 |
+
if image_batch_size == 1:
|
| 1196 |
+
repeat_by = batch_size
|
| 1197 |
+
else:
|
| 1198 |
+
# image batch size is the same as prompt batch size
|
| 1199 |
+
repeat_by = num_images_per_prompt
|
| 1200 |
+
|
| 1201 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 1202 |
+
|
| 1203 |
+
image = image.to(device=device, dtype=dtype)
|
| 1204 |
+
|
| 1205 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 1206 |
+
image = torch.cat([image] * 2)
|
| 1207 |
+
|
| 1208 |
+
return image
|
| 1209 |
+
|
| 1210 |
+
@torch.no_grad()
|
| 1211 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 1212 |
+
def __call__(
|
| 1213 |
+
self,
|
| 1214 |
+
prompt: Optional[Union[str, list[str]]] = None,
|
| 1215 |
+
prompt_2: Optional[Union[str, list[str]]] = None,
|
| 1216 |
+
image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
|
| 1217 |
+
mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
|
| 1218 |
+
adapter_image: PipelineImageInput = None,
|
| 1219 |
+
control_image: PipelineImageInput = None,
|
| 1220 |
+
height: Optional[int] = None,
|
| 1221 |
+
width: Optional[int] = None,
|
| 1222 |
+
strength: float = 0.9999,
|
| 1223 |
+
num_inference_steps: int = 50,
|
| 1224 |
+
denoising_start: Optional[float] = None,
|
| 1225 |
+
denoising_end: Optional[float] = None,
|
| 1226 |
+
guidance_scale: float = 5.0,
|
| 1227 |
+
negative_prompt: Optional[Union[str, list[str]]] = None,
|
| 1228 |
+
negative_prompt_2: Optional[Union[str, list[str]]] = None,
|
| 1229 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1230 |
+
eta: float = 0.0,
|
| 1231 |
+
generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
|
| 1232 |
+
latents: Optional[Union[torch.FloatTensor]] = None,
|
| 1233 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1234 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1235 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1236 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1237 |
+
output_type: Optional[str] = "pil",
|
| 1238 |
+
return_dict: bool = True,
|
| 1239 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 1240 |
+
callback_steps: int = 1,
|
| 1241 |
+
cross_attention_kwargs: Optional[dict[str, Any]] = None,
|
| 1242 |
+
guidance_rescale: float = 0.0,
|
| 1243 |
+
original_size: Optional[tuple[int, int]] = None,
|
| 1244 |
+
crops_coords_top_left: Optional[tuple[int, int]] = (0, 0),
|
| 1245 |
+
target_size: Optional[tuple[int, int]] = None,
|
| 1246 |
+
adapter_conditioning_scale: Optional[Union[float, list[float]]] = 1.0,
|
| 1247 |
+
cond_tau: float = 1.0,
|
| 1248 |
+
aesthetic_score: float = 6.0,
|
| 1249 |
+
negative_aesthetic_score: float = 2.5,
|
| 1250 |
+
controlnet_conditioning_scale=1.0,
|
| 1251 |
+
guess_mode: bool = False,
|
| 1252 |
+
control_guidance_start=0.0,
|
| 1253 |
+
control_guidance_end=1.0,
|
| 1254 |
+
):
|
| 1255 |
+
r"""
|
| 1256 |
+
Function invoked when calling the pipeline for generation.
|
| 1257 |
+
|
| 1258 |
+
Args:
|
| 1259 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1260 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 1261 |
+
instead.
|
| 1262 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 1263 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 1264 |
+
used in both text-encoders
|
| 1265 |
+
image (`PIL.Image.Image`):
|
| 1266 |
+
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
| 1267 |
+
be masked out with `mask_image` and repainted according to `prompt`.
|
| 1268 |
+
mask_image (`PIL.Image.Image`):
|
| 1269 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1270 |
+
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
| 1271 |
+
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
| 1272 |
+
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1273 |
+
adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
|
| 1274 |
+
The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
|
| 1275 |
+
type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
|
| 1276 |
+
accepted as an image. The control image is automatically resized to fit the output image.
|
| 1277 |
+
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 1278 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 1279 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
|
| 1280 |
+
specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
|
| 1281 |
+
accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 1282 |
+
and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
|
| 1283 |
+
`init`, images must be passed as a list such that each element of the list can be correctly batched for
|
| 1284 |
+
input to a single ControlNet.
|
| 1285 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1286 |
+
The height in pixels of the generated image.
|
| 1287 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1288 |
+
The width in pixels of the generated image.
|
| 1289 |
+
strength (`float`, *optional*, defaults to 1.0):
|
| 1290 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 1291 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 1292 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 1293 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 1294 |
+
essentially ignores `image`.
|
| 1295 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1296 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1297 |
+
expense of slower inference.
|
| 1298 |
+
denoising_start (`float`, *optional*):
|
| 1299 |
+
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 1300 |
+
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
|
| 1301 |
+
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
|
| 1302 |
+
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
|
| 1303 |
+
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
|
| 1304 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
|
| 1305 |
+
denoising_end (`float`, *optional*):
|
| 1306 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 1307 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 1308 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 1309 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 1310 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 1311 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 1312 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 1313 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 1314 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1315 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 1316 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1317 |
+
usually at the expense of lower image quality.
|
| 1318 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1319 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1320 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1321 |
+
less than `1`).
|
| 1322 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 1323 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 1324 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 1325 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1326 |
+
The number of images to generate per prompt.
|
| 1327 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1328 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 1329 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1330 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1331 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1332 |
+
to make generation deterministic.
|
| 1333 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 1334 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1335 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1336 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 1337 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1338 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1339 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1340 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1341 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1342 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1343 |
+
argument.
|
| 1344 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1345 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 1346 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 1347 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 1348 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1349 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 1350 |
+
input argument.
|
| 1351 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1352 |
+
The output format of the generate image. Choose between
|
| 1353 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1354 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1355 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
|
| 1356 |
+
instead of a plain tuple.
|
| 1357 |
+
callback (`Callable`, *optional*):
|
| 1358 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1359 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 1360 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1361 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1362 |
+
called at every step.
|
| 1363 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1364 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1365 |
+
`self.processor` in
|
| 1366 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1367 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 1368 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 1369 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 1370 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 1371 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 1372 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1373 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 1374 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
| 1375 |
+
explained in section 2.2 of
|
| 1376 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1377 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1378 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1379 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1380 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1381 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1382 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1383 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1384 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 1385 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1386 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1387 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
|
| 1388 |
+
residual in the original unet. If multiple adapters are specified in init, you can set the
|
| 1389 |
+
corresponding scale as a list.
|
| 1390 |
+
adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1391 |
+
The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
|
| 1392 |
+
residual in the original unet. If multiple adapters are specified in init, you can set the
|
| 1393 |
+
corresponding scale as a list.
|
| 1394 |
+
aesthetic_score (`float`, *optional*, defaults to 6.0):
|
| 1395 |
+
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
|
| 1396 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1397 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1398 |
+
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
|
| 1399 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1400 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
|
| 1401 |
+
simulate an aesthetic score of the generated image by influencing the negative text condition.
|
| 1402 |
+
Examples:
|
| 1403 |
+
|
| 1404 |
+
Returns:
|
| 1405 |
+
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
|
| 1406 |
+
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
|
| 1407 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 1408 |
+
"""
|
| 1409 |
+
# 0. Default height and width to unet
|
| 1410 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 1411 |
+
adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
|
| 1412 |
+
height, width = self._default_height_width(height, width, adapter_image)
|
| 1413 |
+
device = self._execution_device
|
| 1414 |
+
|
| 1415 |
+
if isinstance(adapter, MultiAdapter):
|
| 1416 |
+
adapter_input = []
|
| 1417 |
+
for one_image in adapter_image:
|
| 1418 |
+
one_image = _preprocess_adapter_image(one_image, height, width)
|
| 1419 |
+
one_image = one_image.to(device=device, dtype=adapter.dtype)
|
| 1420 |
+
adapter_input.append(one_image)
|
| 1421 |
+
else:
|
| 1422 |
+
adapter_input = _preprocess_adapter_image(adapter_image, height, width)
|
| 1423 |
+
adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
|
| 1424 |
+
|
| 1425 |
+
original_size = original_size or (height, width)
|
| 1426 |
+
target_size = target_size or (height, width)
|
| 1427 |
+
|
| 1428 |
+
# 0.1 align format for control guidance
|
| 1429 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 1430 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1431 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1432 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1433 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 1434 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 1435 |
+
control_guidance_start, control_guidance_end = (
|
| 1436 |
+
mult * [control_guidance_start],
|
| 1437 |
+
mult * [control_guidance_end],
|
| 1438 |
+
)
|
| 1439 |
+
|
| 1440 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1441 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1442 |
+
if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
|
| 1443 |
+
adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.nets)
|
| 1444 |
+
|
| 1445 |
+
# 1. Check inputs. Raise error if not correct
|
| 1446 |
+
self.check_inputs(
|
| 1447 |
+
prompt,
|
| 1448 |
+
prompt_2,
|
| 1449 |
+
height,
|
| 1450 |
+
width,
|
| 1451 |
+
callback_steps,
|
| 1452 |
+
negative_prompt=negative_prompt,
|
| 1453 |
+
negative_prompt_2=negative_prompt_2,
|
| 1454 |
+
prompt_embeds=prompt_embeds,
|
| 1455 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1456 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1457 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1458 |
+
)
|
| 1459 |
+
|
| 1460 |
+
self.check_conditions(
|
| 1461 |
+
prompt,
|
| 1462 |
+
prompt_embeds,
|
| 1463 |
+
adapter_image,
|
| 1464 |
+
control_image,
|
| 1465 |
+
adapter_conditioning_scale,
|
| 1466 |
+
controlnet_conditioning_scale,
|
| 1467 |
+
control_guidance_start,
|
| 1468 |
+
control_guidance_end,
|
| 1469 |
+
)
|
| 1470 |
+
|
| 1471 |
+
# 2. Define call parameters
|
| 1472 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1473 |
+
batch_size = 1
|
| 1474 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1475 |
+
batch_size = len(prompt)
|
| 1476 |
+
else:
|
| 1477 |
+
batch_size = prompt_embeds.shape[0]
|
| 1478 |
+
|
| 1479 |
+
device = self._execution_device
|
| 1480 |
+
|
| 1481 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1482 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1483 |
+
# corresponds to doing no classifier free guidance.
|
| 1484 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 1485 |
+
|
| 1486 |
+
# 3. Encode input prompt
|
| 1487 |
+
(
|
| 1488 |
+
prompt_embeds,
|
| 1489 |
+
negative_prompt_embeds,
|
| 1490 |
+
pooled_prompt_embeds,
|
| 1491 |
+
negative_pooled_prompt_embeds,
|
| 1492 |
+
) = self.encode_prompt(
|
| 1493 |
+
prompt=prompt,
|
| 1494 |
+
prompt_2=prompt_2,
|
| 1495 |
+
device=device,
|
| 1496 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1497 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1498 |
+
negative_prompt=negative_prompt,
|
| 1499 |
+
negative_prompt_2=negative_prompt_2,
|
| 1500 |
+
prompt_embeds=prompt_embeds,
|
| 1501 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1502 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1503 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1504 |
+
)
|
| 1505 |
+
|
| 1506 |
+
# 4. set timesteps
|
| 1507 |
+
def denoising_value_valid(dnv):
|
| 1508 |
+
return isinstance(dnv, float) and 0 < dnv < 1
|
| 1509 |
+
|
| 1510 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1511 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 1512 |
+
num_inference_steps,
|
| 1513 |
+
strength,
|
| 1514 |
+
device,
|
| 1515 |
+
denoising_start=denoising_start if denoising_value_valid(denoising_start) else None,
|
| 1516 |
+
)
|
| 1517 |
+
# check that number of inference steps is not < 1 - as this doesn't make sense
|
| 1518 |
+
if num_inference_steps < 1:
|
| 1519 |
+
raise ValueError(
|
| 1520 |
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
| 1521 |
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
| 1522 |
+
)
|
| 1523 |
+
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
|
| 1524 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1525 |
+
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
|
| 1526 |
+
is_strength_max = strength == 1.0
|
| 1527 |
+
|
| 1528 |
+
# 5. Preprocess mask and image - resizes image and mask w.r.t height and width
|
| 1529 |
+
mask, masked_image, init_image = prepare_mask_and_masked_image(
|
| 1530 |
+
image, mask_image, height, width, return_image=True
|
| 1531 |
+
)
|
| 1532 |
+
|
| 1533 |
+
# 6. Prepare latent variables
|
| 1534 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 1535 |
+
num_channels_unet = self.unet.config.in_channels
|
| 1536 |
+
return_image_latents = num_channels_unet == 4
|
| 1537 |
+
|
| 1538 |
+
add_noise = denoising_start is None
|
| 1539 |
+
latents_outputs = self.prepare_latents(
|
| 1540 |
+
batch_size * num_images_per_prompt,
|
| 1541 |
+
num_channels_latents,
|
| 1542 |
+
height,
|
| 1543 |
+
width,
|
| 1544 |
+
prompt_embeds.dtype,
|
| 1545 |
+
device,
|
| 1546 |
+
generator,
|
| 1547 |
+
latents,
|
| 1548 |
+
image=init_image,
|
| 1549 |
+
timestep=latent_timestep,
|
| 1550 |
+
is_strength_max=is_strength_max,
|
| 1551 |
+
add_noise=add_noise,
|
| 1552 |
+
return_noise=True,
|
| 1553 |
+
return_image_latents=return_image_latents,
|
| 1554 |
+
)
|
| 1555 |
+
|
| 1556 |
+
if return_image_latents:
|
| 1557 |
+
latents, noise, image_latents = latents_outputs
|
| 1558 |
+
else:
|
| 1559 |
+
latents, noise = latents_outputs
|
| 1560 |
+
|
| 1561 |
+
# 7. Prepare mask latent variables
|
| 1562 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 1563 |
+
mask,
|
| 1564 |
+
masked_image,
|
| 1565 |
+
batch_size * num_images_per_prompt,
|
| 1566 |
+
height,
|
| 1567 |
+
width,
|
| 1568 |
+
prompt_embeds.dtype,
|
| 1569 |
+
device,
|
| 1570 |
+
generator,
|
| 1571 |
+
do_classifier_free_guidance,
|
| 1572 |
+
)
|
| 1573 |
+
|
| 1574 |
+
# 8. Check that sizes of mask, masked image and latents match
|
| 1575 |
+
if num_channels_unet == 9:
|
| 1576 |
+
# default case for runwayml/stable-diffusion-inpainting
|
| 1577 |
+
num_channels_mask = mask.shape[1]
|
| 1578 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1579 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 1580 |
+
raise ValueError(
|
| 1581 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 1582 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 1583 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 1584 |
+
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
|
| 1585 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 1586 |
+
)
|
| 1587 |
+
elif num_channels_unet != 4:
|
| 1588 |
+
raise ValueError(
|
| 1589 |
+
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
|
| 1590 |
+
)
|
| 1591 |
+
|
| 1592 |
+
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1593 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1594 |
+
|
| 1595 |
+
# 10. Prepare added time ids & embeddings & adapter features
|
| 1596 |
+
if isinstance(adapter, MultiAdapter):
|
| 1597 |
+
adapter_state = adapter(adapter_input, adapter_conditioning_scale)
|
| 1598 |
+
for k, v in enumerate(adapter_state):
|
| 1599 |
+
adapter_state[k] = v
|
| 1600 |
+
else:
|
| 1601 |
+
adapter_state = adapter(adapter_input)
|
| 1602 |
+
for k, v in enumerate(adapter_state):
|
| 1603 |
+
adapter_state[k] = v * adapter_conditioning_scale
|
| 1604 |
+
if num_images_per_prompt > 1:
|
| 1605 |
+
for k, v in enumerate(adapter_state):
|
| 1606 |
+
adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
|
| 1607 |
+
if do_classifier_free_guidance:
|
| 1608 |
+
for k, v in enumerate(adapter_state):
|
| 1609 |
+
adapter_state[k] = torch.cat([v] * 2, dim=0)
|
| 1610 |
+
|
| 1611 |
+
# 10.2 Prepare control images
|
| 1612 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1613 |
+
control_image = self.prepare_control_image(
|
| 1614 |
+
image=control_image,
|
| 1615 |
+
width=width,
|
| 1616 |
+
height=height,
|
| 1617 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1618 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1619 |
+
device=device,
|
| 1620 |
+
dtype=controlnet.dtype,
|
| 1621 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1622 |
+
guess_mode=guess_mode,
|
| 1623 |
+
)
|
| 1624 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1625 |
+
control_images = []
|
| 1626 |
+
|
| 1627 |
+
for control_image_ in control_image:
|
| 1628 |
+
control_image_ = self.prepare_control_image(
|
| 1629 |
+
image=control_image_,
|
| 1630 |
+
width=width,
|
| 1631 |
+
height=height,
|
| 1632 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1633 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1634 |
+
device=device,
|
| 1635 |
+
dtype=controlnet.dtype,
|
| 1636 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1637 |
+
guess_mode=guess_mode,
|
| 1638 |
+
)
|
| 1639 |
+
|
| 1640 |
+
control_images.append(control_image_)
|
| 1641 |
+
|
| 1642 |
+
control_image = control_images
|
| 1643 |
+
else:
|
| 1644 |
+
raise ValueError(f"{controlnet.__class__} is not supported.")
|
| 1645 |
+
|
| 1646 |
+
# 8.2 Create tensor stating which controlnets to keep
|
| 1647 |
+
controlnet_keep = []
|
| 1648 |
+
for i in range(len(timesteps)):
|
| 1649 |
+
keeps = [
|
| 1650 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1651 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1652 |
+
]
|
| 1653 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 1654 |
+
controlnet_keep.append(keeps)
|
| 1655 |
+
else:
|
| 1656 |
+
controlnet_keep.append(keeps[0])
|
| 1657 |
+
# ----------------------------------------------------------------
|
| 1658 |
+
|
| 1659 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1660 |
+
if self.text_encoder_2 is None:
|
| 1661 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1662 |
+
else:
|
| 1663 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1664 |
+
|
| 1665 |
+
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
| 1666 |
+
original_size,
|
| 1667 |
+
crops_coords_top_left,
|
| 1668 |
+
target_size,
|
| 1669 |
+
aesthetic_score,
|
| 1670 |
+
negative_aesthetic_score,
|
| 1671 |
+
dtype=prompt_embeds.dtype,
|
| 1672 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1673 |
+
)
|
| 1674 |
+
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 1675 |
+
|
| 1676 |
+
if do_classifier_free_guidance:
|
| 1677 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1678 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1679 |
+
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 1680 |
+
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
|
| 1681 |
+
|
| 1682 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1683 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1684 |
+
add_time_ids = add_time_ids.to(device)
|
| 1685 |
+
|
| 1686 |
+
# 11. Denoising loop
|
| 1687 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1688 |
+
|
| 1689 |
+
# 11.1 Apply denoising_end
|
| 1690 |
+
if (
|
| 1691 |
+
denoising_end is not None
|
| 1692 |
+
and denoising_start is not None
|
| 1693 |
+
and denoising_value_valid(denoising_end)
|
| 1694 |
+
and denoising_value_valid(denoising_start)
|
| 1695 |
+
and denoising_start >= denoising_end
|
| 1696 |
+
):
|
| 1697 |
+
raise ValueError(
|
| 1698 |
+
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
|
| 1699 |
+
+ f" {denoising_end} when using type float."
|
| 1700 |
+
)
|
| 1701 |
+
elif denoising_end is not None and denoising_value_valid(denoising_end):
|
| 1702 |
+
discrete_timestep_cutoff = int(
|
| 1703 |
+
round(
|
| 1704 |
+
self.scheduler.config.num_train_timesteps
|
| 1705 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1706 |
+
)
|
| 1707 |
+
)
|
| 1708 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1709 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1710 |
+
|
| 1711 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1712 |
+
for i, t in enumerate(timesteps):
|
| 1713 |
+
# expand the latents if we are doing classifier free guidance
|
| 1714 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1715 |
+
|
| 1716 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1717 |
+
|
| 1718 |
+
if num_channels_unet == 9:
|
| 1719 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 1720 |
+
|
| 1721 |
+
# predict the noise residual
|
| 1722 |
+
added_cond_kwargs = {
|
| 1723 |
+
"text_embeds": add_text_embeds,
|
| 1724 |
+
"time_ids": add_time_ids,
|
| 1725 |
+
}
|
| 1726 |
+
|
| 1727 |
+
if i < int(num_inference_steps * cond_tau):
|
| 1728 |
+
down_block_additional_residuals = [state.clone() for state in adapter_state]
|
| 1729 |
+
else:
|
| 1730 |
+
down_block_additional_residuals = None
|
| 1731 |
+
|
| 1732 |
+
# ----------- ControlNet
|
| 1733 |
+
|
| 1734 |
+
# expand the latents if we are doing classifier free guidance
|
| 1735 |
+
latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1736 |
+
|
| 1737 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 1738 |
+
latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
|
| 1739 |
+
|
| 1740 |
+
# controlnet(s) inference
|
| 1741 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1742 |
+
# Infer ControlNet only for the conditional batch.
|
| 1743 |
+
control_model_input = latents
|
| 1744 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1745 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1746 |
+
controlnet_added_cond_kwargs = {
|
| 1747 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1748 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1749 |
+
}
|
| 1750 |
+
else:
|
| 1751 |
+
control_model_input = latent_model_input_controlnet
|
| 1752 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1753 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1754 |
+
|
| 1755 |
+
if isinstance(controlnet_keep[i], list):
|
| 1756 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1757 |
+
else:
|
| 1758 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1759 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1760 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1761 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1762 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1763 |
+
control_model_input,
|
| 1764 |
+
t,
|
| 1765 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1766 |
+
controlnet_cond=control_image,
|
| 1767 |
+
conditioning_scale=cond_scale,
|
| 1768 |
+
guess_mode=guess_mode,
|
| 1769 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1770 |
+
return_dict=False,
|
| 1771 |
+
)
|
| 1772 |
+
|
| 1773 |
+
noise_pred = self.unet(
|
| 1774 |
+
latent_model_input,
|
| 1775 |
+
t,
|
| 1776 |
+
encoder_hidden_states=prompt_embeds,
|
| 1777 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1778 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1779 |
+
return_dict=False,
|
| 1780 |
+
down_intrablock_additional_residuals=down_block_additional_residuals, # t2iadapter
|
| 1781 |
+
down_block_additional_residuals=down_block_res_samples, # controlnet
|
| 1782 |
+
mid_block_additional_residual=mid_block_res_sample, # controlnet
|
| 1783 |
+
)[0]
|
| 1784 |
+
|
| 1785 |
+
# perform guidance
|
| 1786 |
+
if do_classifier_free_guidance:
|
| 1787 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1788 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1789 |
+
|
| 1790 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1791 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1792 |
+
noise_pred = rescale_noise_cfg(
|
| 1793 |
+
noise_pred,
|
| 1794 |
+
noise_pred_text,
|
| 1795 |
+
guidance_rescale=guidance_rescale,
|
| 1796 |
+
)
|
| 1797 |
+
|
| 1798 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1799 |
+
latents = self.scheduler.step(
|
| 1800 |
+
noise_pred,
|
| 1801 |
+
t,
|
| 1802 |
+
latents,
|
| 1803 |
+
**extra_step_kwargs,
|
| 1804 |
+
return_dict=False,
|
| 1805 |
+
)[0]
|
| 1806 |
+
|
| 1807 |
+
if num_channels_unet == 4:
|
| 1808 |
+
init_latents_proper = image_latents
|
| 1809 |
+
if do_classifier_free_guidance:
|
| 1810 |
+
init_mask, _ = mask.chunk(2)
|
| 1811 |
+
else:
|
| 1812 |
+
init_mask = mask
|
| 1813 |
+
|
| 1814 |
+
if i < len(timesteps) - 1:
|
| 1815 |
+
noise_timestep = timesteps[i + 1]
|
| 1816 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 1817 |
+
init_latents_proper,
|
| 1818 |
+
noise,
|
| 1819 |
+
torch.tensor([noise_timestep]),
|
| 1820 |
+
)
|
| 1821 |
+
|
| 1822 |
+
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
|
| 1823 |
+
|
| 1824 |
+
# call the callback, if provided
|
| 1825 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1826 |
+
progress_bar.update()
|
| 1827 |
+
if callback is not None and i % callback_steps == 0:
|
| 1828 |
+
callback(i, t, latents)
|
| 1829 |
+
|
| 1830 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1831 |
+
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
| 1832 |
+
self.upcast_vae()
|
| 1833 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1834 |
+
|
| 1835 |
+
if output_type != "latent":
|
| 1836 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1837 |
+
else:
|
| 1838 |
+
image = latents
|
| 1839 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1840 |
+
|
| 1841 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1842 |
+
|
| 1843 |
+
# Offload last model to CPU
|
| 1844 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1845 |
+
self.final_offload_hook.offload()
|
| 1846 |
+
|
| 1847 |
+
if not return_dict:
|
| 1848 |
+
return (image,)
|
| 1849 |
+
|
| 1850 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.27.0/pipeline_stable_diffusion_xl_instantid.py
ADDED
|
@@ -0,0 +1,1061 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The InstantX Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import cv2
|
| 20 |
+
import numpy as np
|
| 21 |
+
import PIL.Image
|
| 22 |
+
import torch
|
| 23 |
+
import torch.nn as nn
|
| 24 |
+
|
| 25 |
+
from diffusers import StableDiffusionXLControlNetPipeline
|
| 26 |
+
from diffusers.image_processor import PipelineImageInput
|
| 27 |
+
from diffusers.models import ControlNetModel
|
| 28 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 29 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
| 30 |
+
from diffusers.utils import (
|
| 31 |
+
deprecate,
|
| 32 |
+
logging,
|
| 33 |
+
replace_example_docstring,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.import_utils import is_xformers_available
|
| 36 |
+
from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
import xformers
|
| 41 |
+
import xformers.ops
|
| 42 |
+
|
| 43 |
+
xformers_available = True
|
| 44 |
+
except Exception:
|
| 45 |
+
xformers_available = False
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def FeedForward(dim, mult=4):
|
| 51 |
+
inner_dim = int(dim * mult)
|
| 52 |
+
return nn.Sequential(
|
| 53 |
+
nn.LayerNorm(dim),
|
| 54 |
+
nn.Linear(dim, inner_dim, bias=False),
|
| 55 |
+
nn.GELU(),
|
| 56 |
+
nn.Linear(inner_dim, dim, bias=False),
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def reshape_tensor(x, heads):
|
| 61 |
+
bs, length, width = x.shape
|
| 62 |
+
# (bs, length, width) --> (bs, length, n_heads, dim_per_head)
|
| 63 |
+
x = x.view(bs, length, heads, -1)
|
| 64 |
+
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
|
| 65 |
+
x = x.transpose(1, 2)
|
| 66 |
+
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
|
| 67 |
+
x = x.reshape(bs, heads, length, -1)
|
| 68 |
+
return x
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class PerceiverAttention(nn.Module):
|
| 72 |
+
def __init__(self, *, dim, dim_head=64, heads=8):
|
| 73 |
+
super().__init__()
|
| 74 |
+
self.scale = dim_head**-0.5
|
| 75 |
+
self.dim_head = dim_head
|
| 76 |
+
self.heads = heads
|
| 77 |
+
inner_dim = dim_head * heads
|
| 78 |
+
|
| 79 |
+
self.norm1 = nn.LayerNorm(dim)
|
| 80 |
+
self.norm2 = nn.LayerNorm(dim)
|
| 81 |
+
|
| 82 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 83 |
+
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
|
| 84 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 85 |
+
|
| 86 |
+
def forward(self, x, latents):
|
| 87 |
+
"""
|
| 88 |
+
Args:
|
| 89 |
+
x (torch.Tensor): image features
|
| 90 |
+
shape (b, n1, D)
|
| 91 |
+
latent (torch.Tensor): latent features
|
| 92 |
+
shape (b, n2, D)
|
| 93 |
+
"""
|
| 94 |
+
x = self.norm1(x)
|
| 95 |
+
latents = self.norm2(latents)
|
| 96 |
+
|
| 97 |
+
b, l, _ = latents.shape
|
| 98 |
+
|
| 99 |
+
q = self.to_q(latents)
|
| 100 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 101 |
+
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
|
| 102 |
+
|
| 103 |
+
q = reshape_tensor(q, self.heads)
|
| 104 |
+
k = reshape_tensor(k, self.heads)
|
| 105 |
+
v = reshape_tensor(v, self.heads)
|
| 106 |
+
|
| 107 |
+
# attention
|
| 108 |
+
scale = 1 / math.sqrt(math.sqrt(self.dim_head))
|
| 109 |
+
weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
|
| 110 |
+
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 111 |
+
out = weight @ v
|
| 112 |
+
|
| 113 |
+
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
|
| 114 |
+
|
| 115 |
+
return self.to_out(out)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Resampler(nn.Module):
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
dim=1024,
|
| 122 |
+
depth=8,
|
| 123 |
+
dim_head=64,
|
| 124 |
+
heads=16,
|
| 125 |
+
num_queries=8,
|
| 126 |
+
embedding_dim=768,
|
| 127 |
+
output_dim=1024,
|
| 128 |
+
ff_mult=4,
|
| 129 |
+
):
|
| 130 |
+
super().__init__()
|
| 131 |
+
|
| 132 |
+
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
|
| 133 |
+
|
| 134 |
+
self.proj_in = nn.Linear(embedding_dim, dim)
|
| 135 |
+
|
| 136 |
+
self.proj_out = nn.Linear(dim, output_dim)
|
| 137 |
+
self.norm_out = nn.LayerNorm(output_dim)
|
| 138 |
+
|
| 139 |
+
self.layers = nn.ModuleList([])
|
| 140 |
+
for _ in range(depth):
|
| 141 |
+
self.layers.append(
|
| 142 |
+
nn.ModuleList(
|
| 143 |
+
[
|
| 144 |
+
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
| 145 |
+
FeedForward(dim=dim, mult=ff_mult),
|
| 146 |
+
]
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
def forward(self, x):
|
| 151 |
+
latents = self.latents.repeat(x.size(0), 1, 1)
|
| 152 |
+
x = self.proj_in(x)
|
| 153 |
+
|
| 154 |
+
for attn, ff in self.layers:
|
| 155 |
+
latents = attn(x, latents) + latents
|
| 156 |
+
latents = ff(latents) + latents
|
| 157 |
+
|
| 158 |
+
latents = self.proj_out(latents)
|
| 159 |
+
return self.norm_out(latents)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class AttnProcessor(nn.Module):
|
| 163 |
+
r"""
|
| 164 |
+
Default processor for performing attention-related computations.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def __init__(
|
| 168 |
+
self,
|
| 169 |
+
hidden_size=None,
|
| 170 |
+
cross_attention_dim=None,
|
| 171 |
+
):
|
| 172 |
+
super().__init__()
|
| 173 |
+
|
| 174 |
+
def __call__(
|
| 175 |
+
self,
|
| 176 |
+
attn,
|
| 177 |
+
hidden_states,
|
| 178 |
+
encoder_hidden_states=None,
|
| 179 |
+
attention_mask=None,
|
| 180 |
+
temb=None,
|
| 181 |
+
):
|
| 182 |
+
residual = hidden_states
|
| 183 |
+
|
| 184 |
+
if attn.spatial_norm is not None:
|
| 185 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 186 |
+
|
| 187 |
+
input_ndim = hidden_states.ndim
|
| 188 |
+
|
| 189 |
+
if input_ndim == 4:
|
| 190 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 191 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 192 |
+
|
| 193 |
+
batch_size, sequence_length, _ = (
|
| 194 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 195 |
+
)
|
| 196 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 197 |
+
|
| 198 |
+
if attn.group_norm is not None:
|
| 199 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 200 |
+
|
| 201 |
+
query = attn.to_q(hidden_states)
|
| 202 |
+
|
| 203 |
+
if encoder_hidden_states is None:
|
| 204 |
+
encoder_hidden_states = hidden_states
|
| 205 |
+
elif attn.norm_cross:
|
| 206 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 207 |
+
|
| 208 |
+
key = attn.to_k(encoder_hidden_states)
|
| 209 |
+
value = attn.to_v(encoder_hidden_states)
|
| 210 |
+
|
| 211 |
+
query = attn.head_to_batch_dim(query)
|
| 212 |
+
key = attn.head_to_batch_dim(key)
|
| 213 |
+
value = attn.head_to_batch_dim(value)
|
| 214 |
+
|
| 215 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 216 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 217 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 218 |
+
|
| 219 |
+
# linear proj
|
| 220 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 221 |
+
# dropout
|
| 222 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 223 |
+
|
| 224 |
+
if input_ndim == 4:
|
| 225 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 226 |
+
|
| 227 |
+
if attn.residual_connection:
|
| 228 |
+
hidden_states = hidden_states + residual
|
| 229 |
+
|
| 230 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 231 |
+
|
| 232 |
+
return hidden_states
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class IPAttnProcessor(nn.Module):
|
| 236 |
+
r"""
|
| 237 |
+
Attention processor for IP-Adapater.
|
| 238 |
+
Args:
|
| 239 |
+
hidden_size (`int`):
|
| 240 |
+
The hidden size of the attention layer.
|
| 241 |
+
cross_attention_dim (`int`):
|
| 242 |
+
The number of channels in the `encoder_hidden_states`.
|
| 243 |
+
scale (`float`, defaults to 1.0):
|
| 244 |
+
the weight scale of image prompt.
|
| 245 |
+
num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
|
| 246 |
+
The context length of the image features.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
|
| 250 |
+
super().__init__()
|
| 251 |
+
|
| 252 |
+
self.hidden_size = hidden_size
|
| 253 |
+
self.cross_attention_dim = cross_attention_dim
|
| 254 |
+
self.scale = scale
|
| 255 |
+
self.num_tokens = num_tokens
|
| 256 |
+
|
| 257 |
+
self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 258 |
+
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 259 |
+
|
| 260 |
+
def __call__(
|
| 261 |
+
self,
|
| 262 |
+
attn,
|
| 263 |
+
hidden_states,
|
| 264 |
+
encoder_hidden_states=None,
|
| 265 |
+
attention_mask=None,
|
| 266 |
+
temb=None,
|
| 267 |
+
):
|
| 268 |
+
residual = hidden_states
|
| 269 |
+
|
| 270 |
+
if attn.spatial_norm is not None:
|
| 271 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 272 |
+
|
| 273 |
+
input_ndim = hidden_states.ndim
|
| 274 |
+
|
| 275 |
+
if input_ndim == 4:
|
| 276 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 277 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 278 |
+
|
| 279 |
+
batch_size, sequence_length, _ = (
|
| 280 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 281 |
+
)
|
| 282 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 283 |
+
|
| 284 |
+
if attn.group_norm is not None:
|
| 285 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 286 |
+
|
| 287 |
+
query = attn.to_q(hidden_states)
|
| 288 |
+
|
| 289 |
+
if encoder_hidden_states is None:
|
| 290 |
+
encoder_hidden_states = hidden_states
|
| 291 |
+
else:
|
| 292 |
+
# get encoder_hidden_states, ip_hidden_states
|
| 293 |
+
end_pos = encoder_hidden_states.shape[1] - self.num_tokens
|
| 294 |
+
encoder_hidden_states, ip_hidden_states = (
|
| 295 |
+
encoder_hidden_states[:, :end_pos, :],
|
| 296 |
+
encoder_hidden_states[:, end_pos:, :],
|
| 297 |
+
)
|
| 298 |
+
if attn.norm_cross:
|
| 299 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 300 |
+
|
| 301 |
+
key = attn.to_k(encoder_hidden_states)
|
| 302 |
+
value = attn.to_v(encoder_hidden_states)
|
| 303 |
+
|
| 304 |
+
query = attn.head_to_batch_dim(query)
|
| 305 |
+
key = attn.head_to_batch_dim(key)
|
| 306 |
+
value = attn.head_to_batch_dim(value)
|
| 307 |
+
|
| 308 |
+
if xformers_available:
|
| 309 |
+
hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
|
| 310 |
+
else:
|
| 311 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 312 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 313 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 314 |
+
|
| 315 |
+
# for ip-adapter
|
| 316 |
+
ip_key = self.to_k_ip(ip_hidden_states)
|
| 317 |
+
ip_value = self.to_v_ip(ip_hidden_states)
|
| 318 |
+
|
| 319 |
+
ip_key = attn.head_to_batch_dim(ip_key)
|
| 320 |
+
ip_value = attn.head_to_batch_dim(ip_value)
|
| 321 |
+
|
| 322 |
+
if xformers_available:
|
| 323 |
+
ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
|
| 324 |
+
else:
|
| 325 |
+
ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
|
| 326 |
+
ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
|
| 327 |
+
ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
|
| 328 |
+
|
| 329 |
+
hidden_states = hidden_states + self.scale * ip_hidden_states
|
| 330 |
+
|
| 331 |
+
# linear proj
|
| 332 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 333 |
+
# dropout
|
| 334 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 335 |
+
|
| 336 |
+
if input_ndim == 4:
|
| 337 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 338 |
+
|
| 339 |
+
if attn.residual_connection:
|
| 340 |
+
hidden_states = hidden_states + residual
|
| 341 |
+
|
| 342 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 343 |
+
|
| 344 |
+
return hidden_states
|
| 345 |
+
|
| 346 |
+
def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
|
| 347 |
+
# TODO attention_mask
|
| 348 |
+
query = query.contiguous()
|
| 349 |
+
key = key.contiguous()
|
| 350 |
+
value = value.contiguous()
|
| 351 |
+
hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
|
| 352 |
+
return hidden_states
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
EXAMPLE_DOC_STRING = """
|
| 356 |
+
Examples:
|
| 357 |
+
```py
|
| 358 |
+
>>> # !pip install opencv-python transformers accelerate insightface
|
| 359 |
+
>>> import diffusers
|
| 360 |
+
>>> from diffusers.utils import load_image
|
| 361 |
+
>>> from diffusers.models import ControlNetModel
|
| 362 |
+
|
| 363 |
+
>>> import cv2
|
| 364 |
+
>>> import torch
|
| 365 |
+
>>> import numpy as np
|
| 366 |
+
>>> from PIL import Image
|
| 367 |
+
|
| 368 |
+
>>> from insightface.app import FaceAnalysis
|
| 369 |
+
>>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
|
| 370 |
+
|
| 371 |
+
>>> # download 'antelopev2' under ./models
|
| 372 |
+
>>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
| 373 |
+
>>> app.prepare(ctx_id=0, det_size=(640, 640))
|
| 374 |
+
|
| 375 |
+
>>> # download models under ./checkpoints
|
| 376 |
+
>>> face_adapter = f'./checkpoints/ip-adapter.bin'
|
| 377 |
+
>>> controlnet_path = f'./checkpoints/ControlNetModel'
|
| 378 |
+
|
| 379 |
+
>>> # load IdentityNet
|
| 380 |
+
>>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
|
| 381 |
+
|
| 382 |
+
>>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
|
| 383 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
|
| 384 |
+
... )
|
| 385 |
+
>>> pipe.cuda()
|
| 386 |
+
|
| 387 |
+
>>> # load adapter
|
| 388 |
+
>>> pipe.load_ip_adapter_instantid(face_adapter)
|
| 389 |
+
|
| 390 |
+
>>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality"
|
| 391 |
+
>>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured"
|
| 392 |
+
|
| 393 |
+
>>> # load an image
|
| 394 |
+
>>> image = load_image("your-example.jpg")
|
| 395 |
+
|
| 396 |
+
>>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1]
|
| 397 |
+
>>> face_emb = face_info['embedding']
|
| 398 |
+
>>> face_kps = draw_kps(face_image, face_info['kps'])
|
| 399 |
+
|
| 400 |
+
>>> pipe.set_ip_adapter_scale(0.8)
|
| 401 |
+
|
| 402 |
+
>>> # generate image
|
| 403 |
+
>>> image = pipe(
|
| 404 |
+
... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8
|
| 405 |
+
... ).images[0]
|
| 406 |
+
```
|
| 407 |
+
"""
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
|
| 411 |
+
stickwidth = 4
|
| 412 |
+
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
|
| 413 |
+
kps = np.array(kps)
|
| 414 |
+
|
| 415 |
+
w, h = image_pil.size
|
| 416 |
+
out_img = np.zeros([h, w, 3])
|
| 417 |
+
|
| 418 |
+
for i in range(len(limbSeq)):
|
| 419 |
+
index = limbSeq[i]
|
| 420 |
+
color = color_list[index[0]]
|
| 421 |
+
|
| 422 |
+
x = kps[index][:, 0]
|
| 423 |
+
y = kps[index][:, 1]
|
| 424 |
+
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
|
| 425 |
+
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
|
| 426 |
+
polygon = cv2.ellipse2Poly(
|
| 427 |
+
(int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
|
| 428 |
+
)
|
| 429 |
+
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
|
| 430 |
+
out_img = (out_img * 0.6).astype(np.uint8)
|
| 431 |
+
|
| 432 |
+
for idx_kp, kp in enumerate(kps):
|
| 433 |
+
color = color_list[idx_kp]
|
| 434 |
+
x, y = kp
|
| 435 |
+
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
|
| 436 |
+
|
| 437 |
+
out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
|
| 438 |
+
return out_img_pil
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
|
| 442 |
+
def cuda(self, dtype=torch.float16, use_xformers=False):
|
| 443 |
+
self.to("cuda", dtype)
|
| 444 |
+
|
| 445 |
+
if hasattr(self, "image_proj_model"):
|
| 446 |
+
self.image_proj_model.to(self.unet.device).to(self.unet.dtype)
|
| 447 |
+
|
| 448 |
+
if use_xformers:
|
| 449 |
+
if is_xformers_available():
|
| 450 |
+
import xformers
|
| 451 |
+
from packaging import version
|
| 452 |
+
|
| 453 |
+
xformers_version = version.parse(xformers.__version__)
|
| 454 |
+
if xformers_version == version.parse("0.0.16"):
|
| 455 |
+
logger.warning(
|
| 456 |
+
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
|
| 457 |
+
)
|
| 458 |
+
self.enable_xformers_memory_efficient_attention()
|
| 459 |
+
else:
|
| 460 |
+
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
| 461 |
+
|
| 462 |
+
def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5):
|
| 463 |
+
self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens)
|
| 464 |
+
self.set_ip_adapter(model_ckpt, num_tokens, scale)
|
| 465 |
+
|
| 466 |
+
def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
|
| 467 |
+
image_proj_model = Resampler(
|
| 468 |
+
dim=1280,
|
| 469 |
+
depth=4,
|
| 470 |
+
dim_head=64,
|
| 471 |
+
heads=20,
|
| 472 |
+
num_queries=num_tokens,
|
| 473 |
+
embedding_dim=image_emb_dim,
|
| 474 |
+
output_dim=self.unet.config.cross_attention_dim,
|
| 475 |
+
ff_mult=4,
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
image_proj_model.eval()
|
| 479 |
+
|
| 480 |
+
self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype)
|
| 481 |
+
state_dict = torch.load(model_ckpt, map_location="cpu")
|
| 482 |
+
if "image_proj" in state_dict:
|
| 483 |
+
state_dict = state_dict["image_proj"]
|
| 484 |
+
self.image_proj_model.load_state_dict(state_dict)
|
| 485 |
+
|
| 486 |
+
self.image_proj_model_in_features = image_emb_dim
|
| 487 |
+
|
| 488 |
+
def set_ip_adapter(self, model_ckpt, num_tokens, scale):
|
| 489 |
+
unet = self.unet
|
| 490 |
+
attn_procs = {}
|
| 491 |
+
for name in unet.attn_processors.keys():
|
| 492 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
| 493 |
+
if name.startswith("mid_block"):
|
| 494 |
+
hidden_size = unet.config.block_out_channels[-1]
|
| 495 |
+
elif name.startswith("up_blocks"):
|
| 496 |
+
block_id = int(name[len("up_blocks.")])
|
| 497 |
+
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
| 498 |
+
elif name.startswith("down_blocks"):
|
| 499 |
+
block_id = int(name[len("down_blocks.")])
|
| 500 |
+
hidden_size = unet.config.block_out_channels[block_id]
|
| 501 |
+
if cross_attention_dim is None:
|
| 502 |
+
attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype)
|
| 503 |
+
else:
|
| 504 |
+
attn_procs[name] = IPAttnProcessor(
|
| 505 |
+
hidden_size=hidden_size,
|
| 506 |
+
cross_attention_dim=cross_attention_dim,
|
| 507 |
+
scale=scale,
|
| 508 |
+
num_tokens=num_tokens,
|
| 509 |
+
).to(unet.device, dtype=unet.dtype)
|
| 510 |
+
unet.set_attn_processor(attn_procs)
|
| 511 |
+
|
| 512 |
+
state_dict = torch.load(model_ckpt, map_location="cpu")
|
| 513 |
+
ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values())
|
| 514 |
+
if "ip_adapter" in state_dict:
|
| 515 |
+
state_dict = state_dict["ip_adapter"]
|
| 516 |
+
ip_layers.load_state_dict(state_dict)
|
| 517 |
+
|
| 518 |
+
def set_ip_adapter_scale(self, scale):
|
| 519 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
| 520 |
+
for attn_processor in unet.attn_processors.values():
|
| 521 |
+
if isinstance(attn_processor, IPAttnProcessor):
|
| 522 |
+
attn_processor.scale = scale
|
| 523 |
+
|
| 524 |
+
def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance):
|
| 525 |
+
if isinstance(prompt_image_emb, torch.Tensor):
|
| 526 |
+
prompt_image_emb = prompt_image_emb.clone().detach()
|
| 527 |
+
else:
|
| 528 |
+
prompt_image_emb = torch.tensor(prompt_image_emb)
|
| 529 |
+
|
| 530 |
+
prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype)
|
| 531 |
+
prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features])
|
| 532 |
+
|
| 533 |
+
if do_classifier_free_guidance:
|
| 534 |
+
prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0)
|
| 535 |
+
else:
|
| 536 |
+
prompt_image_emb = torch.cat([prompt_image_emb], dim=0)
|
| 537 |
+
|
| 538 |
+
prompt_image_emb = self.image_proj_model(prompt_image_emb)
|
| 539 |
+
return prompt_image_emb
|
| 540 |
+
|
| 541 |
+
@torch.no_grad()
|
| 542 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 543 |
+
def __call__(
|
| 544 |
+
self,
|
| 545 |
+
prompt: Union[str, List[str]] = None,
|
| 546 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 547 |
+
image: PipelineImageInput = None,
|
| 548 |
+
height: Optional[int] = None,
|
| 549 |
+
width: Optional[int] = None,
|
| 550 |
+
num_inference_steps: int = 50,
|
| 551 |
+
guidance_scale: float = 5.0,
|
| 552 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 553 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 554 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 555 |
+
eta: float = 0.0,
|
| 556 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 557 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 558 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 559 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 560 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 561 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 562 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
| 563 |
+
output_type: Optional[str] = "pil",
|
| 564 |
+
return_dict: bool = True,
|
| 565 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 566 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 567 |
+
guess_mode: bool = False,
|
| 568 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 569 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 570 |
+
original_size: Tuple[int, int] = None,
|
| 571 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 572 |
+
target_size: Tuple[int, int] = None,
|
| 573 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 574 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 575 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 576 |
+
clip_skip: Optional[int] = None,
|
| 577 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 578 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 579 |
+
**kwargs,
|
| 580 |
+
):
|
| 581 |
+
r"""
|
| 582 |
+
The call function to the pipeline for generation.
|
| 583 |
+
|
| 584 |
+
Args:
|
| 585 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 586 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 587 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 588 |
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 589 |
+
used in both text-encoders.
|
| 590 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 591 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 592 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
|
| 593 |
+
specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
|
| 594 |
+
accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 595 |
+
and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
|
| 596 |
+
`init`, images must be passed as a list such that each element of the list can be correctly batched for
|
| 597 |
+
input to a single ControlNet.
|
| 598 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 599 |
+
The height in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 600 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 601 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 602 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 603 |
+
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 604 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 605 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 606 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 607 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 608 |
+
expense of slower inference.
|
| 609 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 610 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 611 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 612 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 613 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 614 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 615 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 616 |
+
The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
|
| 617 |
+
and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
|
| 618 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 619 |
+
The number of images to generate per prompt.
|
| 620 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 621 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 622 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 623 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 624 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 625 |
+
generation deterministic.
|
| 626 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 627 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 628 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 629 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 630 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 631 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 632 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 633 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 634 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 635 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 636 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 637 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 638 |
+
not provided, pooled text embeddings are generated from `prompt` input argument.
|
| 639 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 640 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
|
| 641 |
+
weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
|
| 642 |
+
argument.
|
| 643 |
+
image_embeds (`torch.FloatTensor`, *optional*):
|
| 644 |
+
Pre-generated image embeddings.
|
| 645 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 646 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 647 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 648 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 649 |
+
plain tuple.
|
| 650 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 651 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 652 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 653 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 654 |
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 655 |
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
| 656 |
+
the corresponding scale as a list.
|
| 657 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 658 |
+
The ControlNet encoder tries to recognize the content of the input image even if you remove all
|
| 659 |
+
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
|
| 660 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 661 |
+
The percentage of total steps at which the ControlNet starts applying.
|
| 662 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 663 |
+
The percentage of total steps at which the ControlNet stops applying.
|
| 664 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 665 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 666 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 667 |
+
explained in section 2.2 of
|
| 668 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 669 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 670 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 671 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 672 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 673 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 674 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 675 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 676 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 677 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 678 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 679 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 680 |
+
micro-conditioning as explained in section 2.2 of
|
| 681 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 682 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 683 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 684 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 685 |
+
micro-conditioning as explained in section 2.2 of
|
| 686 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 687 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 688 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 689 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 690 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 691 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 692 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 693 |
+
clip_skip (`int`, *optional*):
|
| 694 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 695 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 696 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 697 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 698 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 699 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 700 |
+
`callback_on_step_end_tensor_inputs`.
|
| 701 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 702 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 703 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 704 |
+
`._callback_tensor_inputs` attribute of your pipeine class.
|
| 705 |
+
|
| 706 |
+
Examples:
|
| 707 |
+
|
| 708 |
+
Returns:
|
| 709 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 710 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 711 |
+
otherwise a `tuple` is returned containing the output images.
|
| 712 |
+
"""
|
| 713 |
+
|
| 714 |
+
callback = kwargs.pop("callback", None)
|
| 715 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 716 |
+
|
| 717 |
+
if callback is not None:
|
| 718 |
+
deprecate(
|
| 719 |
+
"callback",
|
| 720 |
+
"1.0.0",
|
| 721 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 722 |
+
)
|
| 723 |
+
if callback_steps is not None:
|
| 724 |
+
deprecate(
|
| 725 |
+
"callback_steps",
|
| 726 |
+
"1.0.0",
|
| 727 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 731 |
+
|
| 732 |
+
# align format for control guidance
|
| 733 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 734 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 735 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 736 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 737 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 738 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 739 |
+
control_guidance_start, control_guidance_end = (
|
| 740 |
+
mult * [control_guidance_start],
|
| 741 |
+
mult * [control_guidance_end],
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
# 1. Check inputs. Raise error if not correct
|
| 745 |
+
self.check_inputs(
|
| 746 |
+
prompt,
|
| 747 |
+
prompt_2,
|
| 748 |
+
image,
|
| 749 |
+
callback_steps,
|
| 750 |
+
negative_prompt,
|
| 751 |
+
negative_prompt_2,
|
| 752 |
+
prompt_embeds,
|
| 753 |
+
negative_prompt_embeds,
|
| 754 |
+
pooled_prompt_embeds,
|
| 755 |
+
negative_pooled_prompt_embeds,
|
| 756 |
+
controlnet_conditioning_scale,
|
| 757 |
+
control_guidance_start,
|
| 758 |
+
control_guidance_end,
|
| 759 |
+
callback_on_step_end_tensor_inputs,
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
self._guidance_scale = guidance_scale
|
| 763 |
+
self._clip_skip = clip_skip
|
| 764 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 765 |
+
|
| 766 |
+
# 2. Define call parameters
|
| 767 |
+
if prompt is not None and isinstance(prompt, str):
|
| 768 |
+
batch_size = 1
|
| 769 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 770 |
+
batch_size = len(prompt)
|
| 771 |
+
else:
|
| 772 |
+
batch_size = prompt_embeds.shape[0]
|
| 773 |
+
|
| 774 |
+
device = self._execution_device
|
| 775 |
+
|
| 776 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 777 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 778 |
+
|
| 779 |
+
global_pool_conditions = (
|
| 780 |
+
controlnet.config.global_pool_conditions
|
| 781 |
+
if isinstance(controlnet, ControlNetModel)
|
| 782 |
+
else controlnet.nets[0].config.global_pool_conditions
|
| 783 |
+
)
|
| 784 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 785 |
+
|
| 786 |
+
# 3.1 Encode input prompt
|
| 787 |
+
text_encoder_lora_scale = (
|
| 788 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 789 |
+
)
|
| 790 |
+
(
|
| 791 |
+
prompt_embeds,
|
| 792 |
+
negative_prompt_embeds,
|
| 793 |
+
pooled_prompt_embeds,
|
| 794 |
+
negative_pooled_prompt_embeds,
|
| 795 |
+
) = self.encode_prompt(
|
| 796 |
+
prompt,
|
| 797 |
+
prompt_2,
|
| 798 |
+
device,
|
| 799 |
+
num_images_per_prompt,
|
| 800 |
+
self.do_classifier_free_guidance,
|
| 801 |
+
negative_prompt,
|
| 802 |
+
negative_prompt_2,
|
| 803 |
+
prompt_embeds=prompt_embeds,
|
| 804 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 805 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 806 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 807 |
+
lora_scale=text_encoder_lora_scale,
|
| 808 |
+
clip_skip=self.clip_skip,
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
# 3.2 Encode image prompt
|
| 812 |
+
prompt_image_emb = self._encode_prompt_image_emb(
|
| 813 |
+
image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance
|
| 814 |
+
)
|
| 815 |
+
bs_embed, seq_len, _ = prompt_image_emb.shape
|
| 816 |
+
prompt_image_emb = prompt_image_emb.repeat(1, num_images_per_prompt, 1)
|
| 817 |
+
prompt_image_emb = prompt_image_emb.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 818 |
+
|
| 819 |
+
# 4. Prepare image
|
| 820 |
+
if isinstance(controlnet, ControlNetModel):
|
| 821 |
+
image = self.prepare_image(
|
| 822 |
+
image=image,
|
| 823 |
+
width=width,
|
| 824 |
+
height=height,
|
| 825 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 826 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 827 |
+
device=device,
|
| 828 |
+
dtype=controlnet.dtype,
|
| 829 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 830 |
+
guess_mode=guess_mode,
|
| 831 |
+
)
|
| 832 |
+
height, width = image.shape[-2:]
|
| 833 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 834 |
+
images = []
|
| 835 |
+
|
| 836 |
+
for image_ in image:
|
| 837 |
+
image_ = self.prepare_image(
|
| 838 |
+
image=image_,
|
| 839 |
+
width=width,
|
| 840 |
+
height=height,
|
| 841 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 842 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 843 |
+
device=device,
|
| 844 |
+
dtype=controlnet.dtype,
|
| 845 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 846 |
+
guess_mode=guess_mode,
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
images.append(image_)
|
| 850 |
+
|
| 851 |
+
image = images
|
| 852 |
+
height, width = image[0].shape[-2:]
|
| 853 |
+
else:
|
| 854 |
+
assert False
|
| 855 |
+
|
| 856 |
+
# 5. Prepare timesteps
|
| 857 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 858 |
+
timesteps = self.scheduler.timesteps
|
| 859 |
+
self._num_timesteps = len(timesteps)
|
| 860 |
+
|
| 861 |
+
# 6. Prepare latent variables
|
| 862 |
+
num_channels_latents = self.unet.config.in_channels
|
| 863 |
+
latents = self.prepare_latents(
|
| 864 |
+
batch_size * num_images_per_prompt,
|
| 865 |
+
num_channels_latents,
|
| 866 |
+
height,
|
| 867 |
+
width,
|
| 868 |
+
prompt_embeds.dtype,
|
| 869 |
+
device,
|
| 870 |
+
generator,
|
| 871 |
+
latents,
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
# 6.5 Optionally get Guidance Scale Embedding
|
| 875 |
+
timestep_cond = None
|
| 876 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 877 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 878 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 879 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 880 |
+
).to(device=device, dtype=latents.dtype)
|
| 881 |
+
|
| 882 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 883 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 884 |
+
|
| 885 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 886 |
+
controlnet_keep = []
|
| 887 |
+
for i in range(len(timesteps)):
|
| 888 |
+
keeps = [
|
| 889 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 890 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 891 |
+
]
|
| 892 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 893 |
+
|
| 894 |
+
# 7.2 Prepare added time ids & embeddings
|
| 895 |
+
if isinstance(image, list):
|
| 896 |
+
original_size = original_size or image[0].shape[-2:]
|
| 897 |
+
else:
|
| 898 |
+
original_size = original_size or image.shape[-2:]
|
| 899 |
+
target_size = target_size or (height, width)
|
| 900 |
+
|
| 901 |
+
add_text_embeds = pooled_prompt_embeds
|
| 902 |
+
if self.text_encoder_2 is None:
|
| 903 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 904 |
+
else:
|
| 905 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 906 |
+
|
| 907 |
+
add_time_ids = self._get_add_time_ids(
|
| 908 |
+
original_size,
|
| 909 |
+
crops_coords_top_left,
|
| 910 |
+
target_size,
|
| 911 |
+
dtype=prompt_embeds.dtype,
|
| 912 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 916 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 917 |
+
negative_original_size,
|
| 918 |
+
negative_crops_coords_top_left,
|
| 919 |
+
negative_target_size,
|
| 920 |
+
dtype=prompt_embeds.dtype,
|
| 921 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 922 |
+
)
|
| 923 |
+
else:
|
| 924 |
+
negative_add_time_ids = add_time_ids
|
| 925 |
+
|
| 926 |
+
if self.do_classifier_free_guidance:
|
| 927 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 928 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 929 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 930 |
+
|
| 931 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 932 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 933 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 934 |
+
encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1)
|
| 935 |
+
|
| 936 |
+
# 8. Denoising loop
|
| 937 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 938 |
+
is_unet_compiled = is_compiled_module(self.unet)
|
| 939 |
+
is_controlnet_compiled = is_compiled_module(self.controlnet)
|
| 940 |
+
is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
|
| 941 |
+
|
| 942 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 943 |
+
for i, t in enumerate(timesteps):
|
| 944 |
+
# Relevant thread:
|
| 945 |
+
# https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
|
| 946 |
+
if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
|
| 947 |
+
torch._inductor.cudagraph_mark_step_begin()
|
| 948 |
+
# expand the latents if we are doing classifier free guidance
|
| 949 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 950 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 951 |
+
|
| 952 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 953 |
+
|
| 954 |
+
# controlnet(s) inference
|
| 955 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 956 |
+
# Infer ControlNet only for the conditional batch.
|
| 957 |
+
control_model_input = latents
|
| 958 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 959 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 960 |
+
controlnet_added_cond_kwargs = {
|
| 961 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 962 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 963 |
+
}
|
| 964 |
+
else:
|
| 965 |
+
control_model_input = latent_model_input
|
| 966 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 967 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 968 |
+
|
| 969 |
+
if isinstance(controlnet_keep[i], list):
|
| 970 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 971 |
+
else:
|
| 972 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 973 |
+
if isinstance(controlnet_cond_scale, list):
|
| 974 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 975 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 976 |
+
|
| 977 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 978 |
+
control_model_input,
|
| 979 |
+
t,
|
| 980 |
+
encoder_hidden_states=prompt_image_emb,
|
| 981 |
+
controlnet_cond=image,
|
| 982 |
+
conditioning_scale=cond_scale,
|
| 983 |
+
guess_mode=guess_mode,
|
| 984 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 985 |
+
return_dict=False,
|
| 986 |
+
)
|
| 987 |
+
|
| 988 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 989 |
+
# Infered ControlNet only for the conditional batch.
|
| 990 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 991 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 992 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 993 |
+
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
| 994 |
+
|
| 995 |
+
# predict the noise residual
|
| 996 |
+
noise_pred = self.unet(
|
| 997 |
+
latent_model_input,
|
| 998 |
+
t,
|
| 999 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 1000 |
+
timestep_cond=timestep_cond,
|
| 1001 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1002 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1003 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1004 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1005 |
+
return_dict=False,
|
| 1006 |
+
)[0]
|
| 1007 |
+
|
| 1008 |
+
# perform guidance
|
| 1009 |
+
if self.do_classifier_free_guidance:
|
| 1010 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1011 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1012 |
+
|
| 1013 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1014 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1015 |
+
|
| 1016 |
+
if callback_on_step_end is not None:
|
| 1017 |
+
callback_kwargs = {}
|
| 1018 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1019 |
+
callback_kwargs[k] = locals()[k]
|
| 1020 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1021 |
+
|
| 1022 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1023 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1024 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1025 |
+
|
| 1026 |
+
# call the callback, if provided
|
| 1027 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1028 |
+
progress_bar.update()
|
| 1029 |
+
if callback is not None and i % callback_steps == 0:
|
| 1030 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1031 |
+
callback(step_idx, t, latents)
|
| 1032 |
+
|
| 1033 |
+
if not output_type == "latent":
|
| 1034 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1035 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1036 |
+
if needs_upcasting:
|
| 1037 |
+
self.upcast_vae()
|
| 1038 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1039 |
+
|
| 1040 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1041 |
+
|
| 1042 |
+
# cast back to fp16 if needed
|
| 1043 |
+
if needs_upcasting:
|
| 1044 |
+
self.vae.to(dtype=torch.float16)
|
| 1045 |
+
else:
|
| 1046 |
+
image = latents
|
| 1047 |
+
|
| 1048 |
+
if not output_type == "latent":
|
| 1049 |
+
# apply watermark if available
|
| 1050 |
+
if self.watermark is not None:
|
| 1051 |
+
image = self.watermark.apply_watermark(image)
|
| 1052 |
+
|
| 1053 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1054 |
+
|
| 1055 |
+
# Offload all models
|
| 1056 |
+
self.maybe_free_model_hooks()
|
| 1057 |
+
|
| 1058 |
+
if not return_dict:
|
| 1059 |
+
return (image,)
|
| 1060 |
+
|
| 1061 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.27.0/pipeline_stable_diffusion_xl_ipex.py
ADDED
|
@@ -0,0 +1,1429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import intel_extension_for_pytorch as ipex
|
| 19 |
+
import torch
|
| 20 |
+
from transformers import (
|
| 21 |
+
CLIPImageProcessor,
|
| 22 |
+
CLIPTextModel,
|
| 23 |
+
CLIPTextModelWithProjection,
|
| 24 |
+
CLIPTokenizer,
|
| 25 |
+
CLIPVisionModelWithProjection,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from diffusers import StableDiffusionXLPipeline
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import (
|
| 31 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 32 |
+
TextualInversionLoaderMixin,
|
| 33 |
+
)
|
| 34 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 35 |
+
from diffusers.models.attention_processor import (
|
| 36 |
+
AttnProcessor2_0,
|
| 37 |
+
LoRAAttnProcessor2_0,
|
| 38 |
+
LoRAXFormersAttnProcessor,
|
| 39 |
+
XFormersAttnProcessor,
|
| 40 |
+
)
|
| 41 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 42 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
| 43 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 44 |
+
from diffusers.utils import (
|
| 45 |
+
USE_PEFT_BACKEND,
|
| 46 |
+
deprecate,
|
| 47 |
+
is_invisible_watermark_available,
|
| 48 |
+
is_torch_xla_available,
|
| 49 |
+
logging,
|
| 50 |
+
replace_example_docstring,
|
| 51 |
+
scale_lora_layers,
|
| 52 |
+
unscale_lora_layers,
|
| 53 |
+
)
|
| 54 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if is_invisible_watermark_available():
|
| 58 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 59 |
+
|
| 60 |
+
if is_torch_xla_available():
|
| 61 |
+
import torch_xla.core.xla_model as xm
|
| 62 |
+
|
| 63 |
+
XLA_AVAILABLE = True
|
| 64 |
+
else:
|
| 65 |
+
XLA_AVAILABLE = False
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 69 |
+
|
| 70 |
+
EXAMPLE_DOC_STRING = """
|
| 71 |
+
Examples:
|
| 72 |
+
```py
|
| 73 |
+
>>> import torch
|
| 74 |
+
>>> from diffusers import StableDiffusionXLPipelineIpex
|
| 75 |
+
|
| 76 |
+
>>> # SDXL-Turbo, a distilled version of SDXL 1.0, trained for real-time synthesis
|
| 77 |
+
>>> pipe = StableDiffusionXLPipelineIpex.from_pretrained(
|
| 78 |
+
... "stabilityai/sdxl-turbo", low_cpu_mem_usage=True, use_safetensors=True
|
| 79 |
+
... )
|
| 80 |
+
|
| 81 |
+
>>> num_inference_steps = 1
|
| 82 |
+
>>> guidance_scale = 0.0
|
| 83 |
+
>>> use_bf16 = True
|
| 84 |
+
>>> data_type = torch.bfloat16 if use_bf16 else torch.float32
|
| 85 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 86 |
+
|
| 87 |
+
>>> # value of image height/width should be consistent with the pipeline inference
|
| 88 |
+
>>> # For Float32
|
| 89 |
+
>>> pipe.prepare_for_ipex(torch.float32, prompt, height=512, width=512)
|
| 90 |
+
>>> # For BFloat16
|
| 91 |
+
>>> pipe.prepare_for_ipex(torch.bfloat16, prompt, height=512, width=512)
|
| 92 |
+
|
| 93 |
+
>>> # value of image height/width should be consistent with 'prepare_for_ipex()'
|
| 94 |
+
>>> # For Float32
|
| 95 |
+
>>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0]
|
| 96 |
+
>>> # For BFloat16
|
| 97 |
+
>>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
|
| 98 |
+
>>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0]
|
| 99 |
+
```
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 104 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 105 |
+
"""
|
| 106 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 107 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 108 |
+
"""
|
| 109 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 110 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 111 |
+
# rescale the results from guidance (fixes overexposure)
|
| 112 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 113 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 114 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 115 |
+
return noise_cfg
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 119 |
+
def retrieve_timesteps(
|
| 120 |
+
scheduler,
|
| 121 |
+
num_inference_steps: Optional[int] = None,
|
| 122 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 123 |
+
timesteps: Optional[List[int]] = None,
|
| 124 |
+
**kwargs,
|
| 125 |
+
):
|
| 126 |
+
"""
|
| 127 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 128 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
scheduler (`SchedulerMixin`):
|
| 132 |
+
The scheduler to get timesteps from.
|
| 133 |
+
num_inference_steps (`int`):
|
| 134 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 135 |
+
`timesteps` must be `None`.
|
| 136 |
+
device (`str` or `torch.device`, *optional*):
|
| 137 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 138 |
+
timesteps (`List[int]`, *optional*):
|
| 139 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 140 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 141 |
+
must be `None`.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 145 |
+
second element is the number of inference steps.
|
| 146 |
+
"""
|
| 147 |
+
if timesteps is not None:
|
| 148 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 149 |
+
if not accepts_timesteps:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 152 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 153 |
+
)
|
| 154 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 155 |
+
timesteps = scheduler.timesteps
|
| 156 |
+
num_inference_steps = len(timesteps)
|
| 157 |
+
else:
|
| 158 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 159 |
+
timesteps = scheduler.timesteps
|
| 160 |
+
return timesteps, num_inference_steps
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class StableDiffusionXLPipelineIpex(
|
| 164 |
+
StableDiffusionXLPipeline,
|
| 165 |
+
):
|
| 166 |
+
r"""
|
| 167 |
+
Pipeline for text-to-image generation using Stable Diffusion XL on IPEX.
|
| 168 |
+
|
| 169 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 170 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 171 |
+
|
| 172 |
+
In addition the pipeline inherits the following loading methods:
|
| 173 |
+
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
|
| 174 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
| 175 |
+
|
| 176 |
+
as well as the following saving methods:
|
| 177 |
+
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
vae ([`AutoencoderKL`]):
|
| 181 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 182 |
+
text_encoder ([`CLIPTextModel`]):
|
| 183 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 184 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 185 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 186 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 187 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 188 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 189 |
+
specifically the
|
| 190 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 191 |
+
variant.
|
| 192 |
+
tokenizer (`CLIPTokenizer`):
|
| 193 |
+
Tokenizer of class
|
| 194 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 195 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 196 |
+
Second Tokenizer of class
|
| 197 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 198 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 199 |
+
scheduler ([`SchedulerMixin`]):
|
| 200 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 201 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 202 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 203 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 204 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 205 |
+
add_watermarker (`bool`, *optional*):
|
| 206 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 207 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 208 |
+
watermarker will be used.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 212 |
+
_optional_components = [
|
| 213 |
+
"tokenizer",
|
| 214 |
+
"tokenizer_2",
|
| 215 |
+
"text_encoder",
|
| 216 |
+
"text_encoder_2",
|
| 217 |
+
"image_encoder",
|
| 218 |
+
"feature_extractor",
|
| 219 |
+
]
|
| 220 |
+
_callback_tensor_inputs = [
|
| 221 |
+
"latents",
|
| 222 |
+
"prompt_embeds",
|
| 223 |
+
"negative_prompt_embeds",
|
| 224 |
+
"add_text_embeds",
|
| 225 |
+
"add_time_ids",
|
| 226 |
+
"negative_pooled_prompt_embeds",
|
| 227 |
+
"negative_add_time_ids",
|
| 228 |
+
]
|
| 229 |
+
|
| 230 |
+
def __init__(
|
| 231 |
+
self,
|
| 232 |
+
vae: AutoencoderKL,
|
| 233 |
+
text_encoder: CLIPTextModel,
|
| 234 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 235 |
+
tokenizer: CLIPTokenizer,
|
| 236 |
+
tokenizer_2: CLIPTokenizer,
|
| 237 |
+
unet: UNet2DConditionModel,
|
| 238 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 239 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 240 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 241 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 242 |
+
add_watermarker: Optional[bool] = None,
|
| 243 |
+
):
|
| 244 |
+
# super().__init__()
|
| 245 |
+
|
| 246 |
+
self.register_modules(
|
| 247 |
+
vae=vae,
|
| 248 |
+
text_encoder=text_encoder,
|
| 249 |
+
text_encoder_2=text_encoder_2,
|
| 250 |
+
tokenizer=tokenizer,
|
| 251 |
+
tokenizer_2=tokenizer_2,
|
| 252 |
+
unet=unet,
|
| 253 |
+
scheduler=scheduler,
|
| 254 |
+
image_encoder=image_encoder,
|
| 255 |
+
feature_extractor=feature_extractor,
|
| 256 |
+
)
|
| 257 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 258 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 259 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 260 |
+
|
| 261 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 262 |
+
|
| 263 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 264 |
+
|
| 265 |
+
if add_watermarker:
|
| 266 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 267 |
+
else:
|
| 268 |
+
self.watermark = None
|
| 269 |
+
|
| 270 |
+
def encode_prompt(
|
| 271 |
+
self,
|
| 272 |
+
prompt: str,
|
| 273 |
+
prompt_2: Optional[str] = None,
|
| 274 |
+
device: Optional[torch.device] = None,
|
| 275 |
+
num_images_per_prompt: int = 1,
|
| 276 |
+
do_classifier_free_guidance: bool = True,
|
| 277 |
+
negative_prompt: Optional[str] = None,
|
| 278 |
+
negative_prompt_2: Optional[str] = None,
|
| 279 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 280 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 281 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 282 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 283 |
+
lora_scale: Optional[float] = None,
|
| 284 |
+
clip_skip: Optional[int] = None,
|
| 285 |
+
):
|
| 286 |
+
r"""
|
| 287 |
+
Encodes the prompt into text encoder hidden states.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 291 |
+
prompt to be encoded
|
| 292 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 293 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 294 |
+
used in both text-encoders
|
| 295 |
+
device: (`torch.device`):
|
| 296 |
+
torch device
|
| 297 |
+
num_images_per_prompt (`int`):
|
| 298 |
+
number of images that should be generated per prompt
|
| 299 |
+
do_classifier_free_guidance (`bool`):
|
| 300 |
+
whether to use classifier free guidance or not
|
| 301 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 302 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 303 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 304 |
+
less than `1`).
|
| 305 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 306 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 307 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 308 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 309 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 310 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 311 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 312 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 313 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 314 |
+
argument.
|
| 315 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 316 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 317 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 318 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 319 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 320 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 321 |
+
input argument.
|
| 322 |
+
lora_scale (`float`, *optional*):
|
| 323 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 324 |
+
clip_skip (`int`, *optional*):
|
| 325 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 326 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 327 |
+
"""
|
| 328 |
+
device = device or self._execution_device
|
| 329 |
+
|
| 330 |
+
# set lora scale so that monkey patched LoRA
|
| 331 |
+
# function of text encoder can correctly access it
|
| 332 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 333 |
+
self._lora_scale = lora_scale
|
| 334 |
+
|
| 335 |
+
# dynamically adjust the LoRA scale
|
| 336 |
+
if self.text_encoder is not None:
|
| 337 |
+
if not USE_PEFT_BACKEND:
|
| 338 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 339 |
+
else:
|
| 340 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 341 |
+
|
| 342 |
+
if self.text_encoder_2 is not None:
|
| 343 |
+
if not USE_PEFT_BACKEND:
|
| 344 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 345 |
+
else:
|
| 346 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 347 |
+
|
| 348 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 349 |
+
|
| 350 |
+
if prompt is not None:
|
| 351 |
+
batch_size = len(prompt)
|
| 352 |
+
else:
|
| 353 |
+
batch_size = prompt_embeds.shape[0]
|
| 354 |
+
|
| 355 |
+
# Define tokenizers and text encoders
|
| 356 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 357 |
+
text_encoders = (
|
| 358 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
if prompt_embeds is None:
|
| 362 |
+
prompt_2 = prompt_2 or prompt
|
| 363 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 364 |
+
|
| 365 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 366 |
+
prompt_embeds_list = []
|
| 367 |
+
prompts = [prompt, prompt_2]
|
| 368 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 369 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 370 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 371 |
+
|
| 372 |
+
text_inputs = tokenizer(
|
| 373 |
+
prompt,
|
| 374 |
+
padding="max_length",
|
| 375 |
+
max_length=tokenizer.model_max_length,
|
| 376 |
+
truncation=True,
|
| 377 |
+
return_tensors="pt",
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
text_input_ids = text_inputs.input_ids
|
| 381 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 382 |
+
|
| 383 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 384 |
+
text_input_ids, untruncated_ids
|
| 385 |
+
):
|
| 386 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 387 |
+
logger.warning(
|
| 388 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 389 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 393 |
+
|
| 394 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 395 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 396 |
+
if clip_skip is None:
|
| 397 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 398 |
+
else:
|
| 399 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 400 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 401 |
+
|
| 402 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 403 |
+
|
| 404 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 405 |
+
|
| 406 |
+
# get unconditional embeddings for classifier free guidance
|
| 407 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 408 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 409 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 410 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 411 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 412 |
+
negative_prompt = negative_prompt or ""
|
| 413 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 414 |
+
|
| 415 |
+
# normalize str to list
|
| 416 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 417 |
+
negative_prompt_2 = (
|
| 418 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
uncond_tokens: List[str]
|
| 422 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 423 |
+
raise TypeError(
|
| 424 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 425 |
+
f" {type(prompt)}."
|
| 426 |
+
)
|
| 427 |
+
elif batch_size != len(negative_prompt):
|
| 428 |
+
raise ValueError(
|
| 429 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 430 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 431 |
+
" the batch size of `prompt`."
|
| 432 |
+
)
|
| 433 |
+
else:
|
| 434 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 435 |
+
|
| 436 |
+
negative_prompt_embeds_list = []
|
| 437 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 438 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 439 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 440 |
+
|
| 441 |
+
max_length = prompt_embeds.shape[1]
|
| 442 |
+
uncond_input = tokenizer(
|
| 443 |
+
negative_prompt,
|
| 444 |
+
padding="max_length",
|
| 445 |
+
max_length=max_length,
|
| 446 |
+
truncation=True,
|
| 447 |
+
return_tensors="pt",
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
negative_prompt_embeds = text_encoder(
|
| 451 |
+
uncond_input.input_ids.to(device),
|
| 452 |
+
output_hidden_states=True,
|
| 453 |
+
)
|
| 454 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 455 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 456 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 457 |
+
|
| 458 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 459 |
+
|
| 460 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 461 |
+
|
| 462 |
+
if self.text_encoder_2 is not None:
|
| 463 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 464 |
+
else:
|
| 465 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 466 |
+
|
| 467 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 468 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 469 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 470 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 471 |
+
|
| 472 |
+
if do_classifier_free_guidance:
|
| 473 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 474 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 475 |
+
|
| 476 |
+
if self.text_encoder_2 is not None:
|
| 477 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 478 |
+
else:
|
| 479 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 480 |
+
|
| 481 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 482 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 483 |
+
|
| 484 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 485 |
+
bs_embed * num_images_per_prompt, -1
|
| 486 |
+
)
|
| 487 |
+
if do_classifier_free_guidance:
|
| 488 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 489 |
+
bs_embed * num_images_per_prompt, -1
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
if self.text_encoder is not None:
|
| 493 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 494 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 495 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 496 |
+
|
| 497 |
+
if self.text_encoder_2 is not None:
|
| 498 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 499 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 500 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 501 |
+
|
| 502 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 503 |
+
|
| 504 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 505 |
+
def encode_image(self, image, device, num_images_per_prompt):
|
| 506 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 507 |
+
|
| 508 |
+
if not isinstance(image, torch.Tensor):
|
| 509 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 510 |
+
|
| 511 |
+
image = image.to(device=device, dtype=dtype)
|
| 512 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 513 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 514 |
+
|
| 515 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 516 |
+
return image_embeds, uncond_image_embeds
|
| 517 |
+
|
| 518 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 519 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 520 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 521 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 522 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 523 |
+
# and should be between [0, 1]
|
| 524 |
+
|
| 525 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 526 |
+
extra_step_kwargs = {}
|
| 527 |
+
if accepts_eta:
|
| 528 |
+
extra_step_kwargs["eta"] = eta
|
| 529 |
+
|
| 530 |
+
# check if the scheduler accepts generator
|
| 531 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 532 |
+
if accepts_generator:
|
| 533 |
+
extra_step_kwargs["generator"] = generator
|
| 534 |
+
return extra_step_kwargs
|
| 535 |
+
|
| 536 |
+
def check_inputs(
|
| 537 |
+
self,
|
| 538 |
+
prompt,
|
| 539 |
+
prompt_2,
|
| 540 |
+
height,
|
| 541 |
+
width,
|
| 542 |
+
callback_steps,
|
| 543 |
+
negative_prompt=None,
|
| 544 |
+
negative_prompt_2=None,
|
| 545 |
+
prompt_embeds=None,
|
| 546 |
+
negative_prompt_embeds=None,
|
| 547 |
+
pooled_prompt_embeds=None,
|
| 548 |
+
negative_pooled_prompt_embeds=None,
|
| 549 |
+
callback_on_step_end_tensor_inputs=None,
|
| 550 |
+
):
|
| 551 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 552 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 553 |
+
|
| 554 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 555 |
+
raise ValueError(
|
| 556 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 557 |
+
f" {type(callback_steps)}."
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 561 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 562 |
+
):
|
| 563 |
+
raise ValueError(
|
| 564 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
if prompt is not None and prompt_embeds is not None:
|
| 568 |
+
raise ValueError(
|
| 569 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 570 |
+
" only forward one of the two."
|
| 571 |
+
)
|
| 572 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 573 |
+
raise ValueError(
|
| 574 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 575 |
+
" only forward one of the two."
|
| 576 |
+
)
|
| 577 |
+
elif prompt is None and prompt_embeds is None:
|
| 578 |
+
raise ValueError(
|
| 579 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 580 |
+
)
|
| 581 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 582 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 583 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 584 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 585 |
+
|
| 586 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 587 |
+
raise ValueError(
|
| 588 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 589 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 590 |
+
)
|
| 591 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 592 |
+
raise ValueError(
|
| 593 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 594 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 598 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 599 |
+
raise ValueError(
|
| 600 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 601 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 602 |
+
f" {negative_prompt_embeds.shape}."
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 606 |
+
raise ValueError(
|
| 607 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 611 |
+
raise ValueError(
|
| 612 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 616 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 617 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 618 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 619 |
+
raise ValueError(
|
| 620 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 621 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
if latents is None:
|
| 625 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32)
|
| 626 |
+
else:
|
| 627 |
+
latents = latents.to(device)
|
| 628 |
+
|
| 629 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 630 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 631 |
+
return latents
|
| 632 |
+
|
| 633 |
+
def _get_add_time_ids(
|
| 634 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
| 635 |
+
):
|
| 636 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 637 |
+
|
| 638 |
+
passed_add_embed_dim = (
|
| 639 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 640 |
+
)
|
| 641 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 642 |
+
|
| 643 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 644 |
+
raise ValueError(
|
| 645 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 649 |
+
return add_time_ids
|
| 650 |
+
|
| 651 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 652 |
+
def upcast_vae(self):
|
| 653 |
+
dtype = self.vae.dtype
|
| 654 |
+
self.vae.to(dtype=torch.float32)
|
| 655 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 656 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 657 |
+
(
|
| 658 |
+
AttnProcessor2_0,
|
| 659 |
+
XFormersAttnProcessor,
|
| 660 |
+
LoRAXFormersAttnProcessor,
|
| 661 |
+
LoRAAttnProcessor2_0,
|
| 662 |
+
),
|
| 663 |
+
)
|
| 664 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 665 |
+
# to be in float32 which can save lots of memory
|
| 666 |
+
if use_torch_2_0_or_xformers:
|
| 667 |
+
self.vae.post_quant_conv.to(dtype)
|
| 668 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 669 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 670 |
+
|
| 671 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 672 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 673 |
+
"""
|
| 674 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 675 |
+
|
| 676 |
+
Args:
|
| 677 |
+
timesteps (`torch.Tensor`):
|
| 678 |
+
generate embedding vectors at these timesteps
|
| 679 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 680 |
+
dimension of the embeddings to generate
|
| 681 |
+
dtype:
|
| 682 |
+
data type of the generated embeddings
|
| 683 |
+
|
| 684 |
+
Returns:
|
| 685 |
+
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 686 |
+
"""
|
| 687 |
+
assert len(w.shape) == 1
|
| 688 |
+
w = w * 1000.0
|
| 689 |
+
|
| 690 |
+
half_dim = embedding_dim // 2
|
| 691 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 692 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 693 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 694 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 695 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 696 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 697 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 698 |
+
return emb
|
| 699 |
+
|
| 700 |
+
@property
|
| 701 |
+
def guidance_scale(self):
|
| 702 |
+
return self._guidance_scale
|
| 703 |
+
|
| 704 |
+
@property
|
| 705 |
+
def guidance_rescale(self):
|
| 706 |
+
return self._guidance_rescale
|
| 707 |
+
|
| 708 |
+
@property
|
| 709 |
+
def clip_skip(self):
|
| 710 |
+
return self._clip_skip
|
| 711 |
+
|
| 712 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 713 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 714 |
+
# corresponds to doing no classifier free guidance.
|
| 715 |
+
@property
|
| 716 |
+
def do_classifier_free_guidance(self):
|
| 717 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 718 |
+
|
| 719 |
+
@property
|
| 720 |
+
def cross_attention_kwargs(self):
|
| 721 |
+
return self._cross_attention_kwargs
|
| 722 |
+
|
| 723 |
+
@property
|
| 724 |
+
def denoising_end(self):
|
| 725 |
+
return self._denoising_end
|
| 726 |
+
|
| 727 |
+
@property
|
| 728 |
+
def num_timesteps(self):
|
| 729 |
+
return self._num_timesteps
|
| 730 |
+
|
| 731 |
+
@torch.no_grad()
|
| 732 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 733 |
+
def __call__(
|
| 734 |
+
self,
|
| 735 |
+
prompt: Union[str, List[str]] = None,
|
| 736 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 737 |
+
height: Optional[int] = None,
|
| 738 |
+
width: Optional[int] = None,
|
| 739 |
+
num_inference_steps: int = 50,
|
| 740 |
+
timesteps: List[int] = None,
|
| 741 |
+
denoising_end: Optional[float] = None,
|
| 742 |
+
guidance_scale: float = 5.0,
|
| 743 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 744 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 745 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 746 |
+
eta: float = 0.0,
|
| 747 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 748 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 749 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 750 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 751 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 752 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 753 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 754 |
+
output_type: Optional[str] = "pil",
|
| 755 |
+
return_dict: bool = True,
|
| 756 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 757 |
+
guidance_rescale: float = 0.0,
|
| 758 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 759 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 760 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 761 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 762 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 763 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 764 |
+
clip_skip: Optional[int] = None,
|
| 765 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 766 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 767 |
+
**kwargs,
|
| 768 |
+
):
|
| 769 |
+
r"""
|
| 770 |
+
Function invoked when calling the pipeline for generation.
|
| 771 |
+
|
| 772 |
+
Args:
|
| 773 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 774 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 775 |
+
instead.
|
| 776 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 777 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 778 |
+
used in both text-encoders
|
| 779 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 780 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 781 |
+
Anything below 512 pixels won't work well for
|
| 782 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 783 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 784 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 785 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 786 |
+
Anything below 512 pixels won't work well for
|
| 787 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 788 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 789 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 790 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 791 |
+
expense of slower inference.
|
| 792 |
+
timesteps (`List[int]`, *optional*):
|
| 793 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 794 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 795 |
+
passed will be used. Must be in descending order.
|
| 796 |
+
denoising_end (`float`, *optional*):
|
| 797 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 798 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 799 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 800 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 801 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 802 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 803 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 804 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 805 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 806 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 807 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 808 |
+
usually at the expense of lower image quality.
|
| 809 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 810 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 811 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 812 |
+
less than `1`).
|
| 813 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 814 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 815 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 816 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 817 |
+
The number of images to generate per prompt.
|
| 818 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 819 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 820 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 821 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 822 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 823 |
+
to make generation deterministic.
|
| 824 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 825 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 826 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 827 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 828 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 829 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 830 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 831 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 832 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 833 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 834 |
+
argument.
|
| 835 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 836 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 837 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 838 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 839 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 840 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 841 |
+
input argument.
|
| 842 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 843 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 844 |
+
The output format of the generate image. Choose between
|
| 845 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 846 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 847 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 848 |
+
of a plain tuple.
|
| 849 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 850 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 851 |
+
`self.processor` in
|
| 852 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 853 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 854 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 855 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 856 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 857 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 858 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 859 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 860 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 861 |
+
explained in section 2.2 of
|
| 862 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 863 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 864 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 865 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 866 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 867 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 868 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 869 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 870 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 871 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 872 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 873 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 874 |
+
micro-conditioning as explained in section 2.2 of
|
| 875 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 876 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 877 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 878 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 879 |
+
micro-conditioning as explained in section 2.2 of
|
| 880 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 881 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 882 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 883 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 884 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 885 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 886 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 887 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 888 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 889 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 890 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 891 |
+
`callback_on_step_end_tensor_inputs`.
|
| 892 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 893 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 894 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 895 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 896 |
+
|
| 897 |
+
Examples:
|
| 898 |
+
|
| 899 |
+
Returns:
|
| 900 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
|
| 901 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
| 902 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 903 |
+
"""
|
| 904 |
+
|
| 905 |
+
callback = kwargs.pop("callback", None)
|
| 906 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 907 |
+
|
| 908 |
+
if callback is not None:
|
| 909 |
+
deprecate(
|
| 910 |
+
"callback",
|
| 911 |
+
"1.0.0",
|
| 912 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 913 |
+
)
|
| 914 |
+
if callback_steps is not None:
|
| 915 |
+
deprecate(
|
| 916 |
+
"callback_steps",
|
| 917 |
+
"1.0.0",
|
| 918 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
# 0. Default height and width to unet
|
| 922 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 923 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 924 |
+
|
| 925 |
+
original_size = original_size or (height, width)
|
| 926 |
+
target_size = target_size or (height, width)
|
| 927 |
+
|
| 928 |
+
# 1. Check inputs. Raise error if not correct
|
| 929 |
+
self.check_inputs(
|
| 930 |
+
prompt,
|
| 931 |
+
prompt_2,
|
| 932 |
+
height,
|
| 933 |
+
width,
|
| 934 |
+
callback_steps,
|
| 935 |
+
negative_prompt,
|
| 936 |
+
negative_prompt_2,
|
| 937 |
+
prompt_embeds,
|
| 938 |
+
negative_prompt_embeds,
|
| 939 |
+
pooled_prompt_embeds,
|
| 940 |
+
negative_pooled_prompt_embeds,
|
| 941 |
+
callback_on_step_end_tensor_inputs,
|
| 942 |
+
)
|
| 943 |
+
|
| 944 |
+
self._guidance_scale = guidance_scale
|
| 945 |
+
self._guidance_rescale = guidance_rescale
|
| 946 |
+
self._clip_skip = clip_skip
|
| 947 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 948 |
+
self._denoising_end = denoising_end
|
| 949 |
+
|
| 950 |
+
# 2. Define call parameters
|
| 951 |
+
if prompt is not None and isinstance(prompt, str):
|
| 952 |
+
batch_size = 1
|
| 953 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 954 |
+
batch_size = len(prompt)
|
| 955 |
+
else:
|
| 956 |
+
batch_size = prompt_embeds.shape[0]
|
| 957 |
+
|
| 958 |
+
device = self._execution_device
|
| 959 |
+
|
| 960 |
+
# 3. Encode input prompt
|
| 961 |
+
lora_scale = (
|
| 962 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 963 |
+
)
|
| 964 |
+
|
| 965 |
+
(
|
| 966 |
+
prompt_embeds,
|
| 967 |
+
negative_prompt_embeds,
|
| 968 |
+
pooled_prompt_embeds,
|
| 969 |
+
negative_pooled_prompt_embeds,
|
| 970 |
+
) = self.encode_prompt(
|
| 971 |
+
prompt=prompt,
|
| 972 |
+
prompt_2=prompt_2,
|
| 973 |
+
device=device,
|
| 974 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 975 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 976 |
+
negative_prompt=negative_prompt,
|
| 977 |
+
negative_prompt_2=negative_prompt_2,
|
| 978 |
+
prompt_embeds=prompt_embeds,
|
| 979 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 980 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 981 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 982 |
+
lora_scale=lora_scale,
|
| 983 |
+
clip_skip=self.clip_skip,
|
| 984 |
+
)
|
| 985 |
+
|
| 986 |
+
# 4. Prepare timesteps
|
| 987 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 988 |
+
|
| 989 |
+
# 5. Prepare latent variables
|
| 990 |
+
num_channels_latents = self.unet.config.in_channels
|
| 991 |
+
latents = self.prepare_latents(
|
| 992 |
+
batch_size * num_images_per_prompt,
|
| 993 |
+
num_channels_latents,
|
| 994 |
+
height,
|
| 995 |
+
width,
|
| 996 |
+
prompt_embeds.dtype,
|
| 997 |
+
device,
|
| 998 |
+
generator,
|
| 999 |
+
latents,
|
| 1000 |
+
)
|
| 1001 |
+
|
| 1002 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1003 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1004 |
+
|
| 1005 |
+
# 7. Prepare added time ids & embeddings
|
| 1006 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1007 |
+
if self.text_encoder_2 is None:
|
| 1008 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1009 |
+
else:
|
| 1010 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1011 |
+
|
| 1012 |
+
add_time_ids = self._get_add_time_ids(
|
| 1013 |
+
original_size,
|
| 1014 |
+
crops_coords_top_left,
|
| 1015 |
+
target_size,
|
| 1016 |
+
dtype=prompt_embeds.dtype,
|
| 1017 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1018 |
+
)
|
| 1019 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1020 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1021 |
+
negative_original_size,
|
| 1022 |
+
negative_crops_coords_top_left,
|
| 1023 |
+
negative_target_size,
|
| 1024 |
+
dtype=prompt_embeds.dtype,
|
| 1025 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1026 |
+
)
|
| 1027 |
+
else:
|
| 1028 |
+
negative_add_time_ids = add_time_ids
|
| 1029 |
+
|
| 1030 |
+
if self.do_classifier_free_guidance:
|
| 1031 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1032 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1033 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 1034 |
+
|
| 1035 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1036 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1037 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1038 |
+
|
| 1039 |
+
if ip_adapter_image is not None:
|
| 1040 |
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
| 1041 |
+
if self.do_classifier_free_guidance:
|
| 1042 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1043 |
+
image_embeds = image_embeds.to(device)
|
| 1044 |
+
|
| 1045 |
+
# 8. Denoising loop
|
| 1046 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1047 |
+
|
| 1048 |
+
# 8.1 Apply denoising_end
|
| 1049 |
+
if (
|
| 1050 |
+
self.denoising_end is not None
|
| 1051 |
+
and isinstance(self.denoising_end, float)
|
| 1052 |
+
and self.denoising_end > 0
|
| 1053 |
+
and self.denoising_end < 1
|
| 1054 |
+
):
|
| 1055 |
+
discrete_timestep_cutoff = int(
|
| 1056 |
+
round(
|
| 1057 |
+
self.scheduler.config.num_train_timesteps
|
| 1058 |
+
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1059 |
+
)
|
| 1060 |
+
)
|
| 1061 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1062 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1063 |
+
|
| 1064 |
+
# 9. Optionally get Guidance Scale Embedding
|
| 1065 |
+
timestep_cond = None
|
| 1066 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1067 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1068 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1069 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1070 |
+
).to(device=device, dtype=latents.dtype)
|
| 1071 |
+
|
| 1072 |
+
self._num_timesteps = len(timesteps)
|
| 1073 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1074 |
+
for i, t in enumerate(timesteps):
|
| 1075 |
+
# expand the latents if we are doing classifier free guidance
|
| 1076 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1077 |
+
|
| 1078 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1079 |
+
|
| 1080 |
+
# predict the noise residual
|
| 1081 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1082 |
+
if ip_adapter_image is not None:
|
| 1083 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1084 |
+
|
| 1085 |
+
# noise_pred = self.unet(
|
| 1086 |
+
# latent_model_input,
|
| 1087 |
+
# t,
|
| 1088 |
+
# encoder_hidden_states=prompt_embeds,
|
| 1089 |
+
# timestep_cond=timestep_cond,
|
| 1090 |
+
# cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1091 |
+
# added_cond_kwargs=added_cond_kwargs,
|
| 1092 |
+
# return_dict=False,
|
| 1093 |
+
# )[0]
|
| 1094 |
+
|
| 1095 |
+
noise_pred = self.unet(
|
| 1096 |
+
latent_model_input,
|
| 1097 |
+
t,
|
| 1098 |
+
encoder_hidden_states=prompt_embeds,
|
| 1099 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1100 |
+
)["sample"]
|
| 1101 |
+
|
| 1102 |
+
# perform guidance
|
| 1103 |
+
if self.do_classifier_free_guidance:
|
| 1104 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1105 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1106 |
+
|
| 1107 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1108 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1109 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1110 |
+
|
| 1111 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1112 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1113 |
+
|
| 1114 |
+
if callback_on_step_end is not None:
|
| 1115 |
+
callback_kwargs = {}
|
| 1116 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1117 |
+
callback_kwargs[k] = locals()[k]
|
| 1118 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1119 |
+
|
| 1120 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1121 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1122 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1123 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 1124 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 1125 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 1126 |
+
)
|
| 1127 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 1128 |
+
negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
|
| 1129 |
+
|
| 1130 |
+
# call the callback, if provided
|
| 1131 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1132 |
+
progress_bar.update()
|
| 1133 |
+
if callback is not None and i % callback_steps == 0:
|
| 1134 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1135 |
+
callback(step_idx, t, latents)
|
| 1136 |
+
|
| 1137 |
+
if XLA_AVAILABLE:
|
| 1138 |
+
xm.mark_step()
|
| 1139 |
+
|
| 1140 |
+
if not output_type == "latent":
|
| 1141 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1142 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1143 |
+
|
| 1144 |
+
if needs_upcasting:
|
| 1145 |
+
self.upcast_vae()
|
| 1146 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1147 |
+
|
| 1148 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1149 |
+
|
| 1150 |
+
# cast back to fp16 if needed
|
| 1151 |
+
if needs_upcasting:
|
| 1152 |
+
self.vae.to(dtype=torch.float16)
|
| 1153 |
+
else:
|
| 1154 |
+
image = latents
|
| 1155 |
+
|
| 1156 |
+
if not output_type == "latent":
|
| 1157 |
+
# apply watermark if available
|
| 1158 |
+
if self.watermark is not None:
|
| 1159 |
+
image = self.watermark.apply_watermark(image)
|
| 1160 |
+
|
| 1161 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1162 |
+
|
| 1163 |
+
# Offload all models
|
| 1164 |
+
self.maybe_free_model_hooks()
|
| 1165 |
+
|
| 1166 |
+
if not return_dict:
|
| 1167 |
+
return (image,)
|
| 1168 |
+
|
| 1169 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1170 |
+
|
| 1171 |
+
@torch.no_grad()
|
| 1172 |
+
def prepare_for_ipex(
|
| 1173 |
+
self,
|
| 1174 |
+
dtype=torch.float32,
|
| 1175 |
+
prompt: Union[str, List[str]] = None,
|
| 1176 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 1177 |
+
height: Optional[int] = None,
|
| 1178 |
+
width: Optional[int] = None,
|
| 1179 |
+
num_inference_steps: int = 50,
|
| 1180 |
+
timesteps: List[int] = None,
|
| 1181 |
+
denoising_end: Optional[float] = None,
|
| 1182 |
+
guidance_scale: float = 5.0,
|
| 1183 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1184 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 1185 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1186 |
+
eta: float = 0.0,
|
| 1187 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1188 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 1189 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1190 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1191 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1192 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 1193 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 1194 |
+
output_type: Optional[str] = "pil",
|
| 1195 |
+
return_dict: bool = True,
|
| 1196 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1197 |
+
guidance_rescale: float = 0.0,
|
| 1198 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 1199 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1200 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 1201 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 1202 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1203 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 1204 |
+
clip_skip: Optional[int] = None,
|
| 1205 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 1206 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 1207 |
+
**kwargs,
|
| 1208 |
+
):
|
| 1209 |
+
callback = kwargs.pop("callback", None)
|
| 1210 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 1211 |
+
|
| 1212 |
+
if callback is not None:
|
| 1213 |
+
deprecate(
|
| 1214 |
+
"callback",
|
| 1215 |
+
"1.0.0",
|
| 1216 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 1217 |
+
)
|
| 1218 |
+
if callback_steps is not None:
|
| 1219 |
+
deprecate(
|
| 1220 |
+
"callback_steps",
|
| 1221 |
+
"1.0.0",
|
| 1222 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 1223 |
+
)
|
| 1224 |
+
|
| 1225 |
+
# 0. Default height and width to unet
|
| 1226 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 1227 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 1228 |
+
|
| 1229 |
+
original_size = original_size or (height, width)
|
| 1230 |
+
target_size = target_size or (height, width)
|
| 1231 |
+
|
| 1232 |
+
# 1. Check inputs. Raise error if not correct
|
| 1233 |
+
self.check_inputs(
|
| 1234 |
+
prompt,
|
| 1235 |
+
prompt_2,
|
| 1236 |
+
height,
|
| 1237 |
+
width,
|
| 1238 |
+
callback_steps,
|
| 1239 |
+
negative_prompt,
|
| 1240 |
+
negative_prompt_2,
|
| 1241 |
+
prompt_embeds,
|
| 1242 |
+
negative_prompt_embeds,
|
| 1243 |
+
pooled_prompt_embeds,
|
| 1244 |
+
negative_pooled_prompt_embeds,
|
| 1245 |
+
callback_on_step_end_tensor_inputs,
|
| 1246 |
+
)
|
| 1247 |
+
|
| 1248 |
+
self._guidance_scale = guidance_scale
|
| 1249 |
+
self._guidance_rescale = guidance_rescale
|
| 1250 |
+
self._clip_skip = clip_skip
|
| 1251 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1252 |
+
self._denoising_end = denoising_end
|
| 1253 |
+
|
| 1254 |
+
# 2. Define call parameters
|
| 1255 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1256 |
+
batch_size = 1
|
| 1257 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1258 |
+
batch_size = len(prompt)
|
| 1259 |
+
else:
|
| 1260 |
+
batch_size = prompt_embeds.shape[0]
|
| 1261 |
+
|
| 1262 |
+
device = "cpu"
|
| 1263 |
+
do_classifier_free_guidance = self.do_classifier_free_guidance
|
| 1264 |
+
|
| 1265 |
+
# 3. Encode input prompt
|
| 1266 |
+
lora_scale = (
|
| 1267 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1268 |
+
)
|
| 1269 |
+
|
| 1270 |
+
(
|
| 1271 |
+
prompt_embeds,
|
| 1272 |
+
negative_prompt_embeds,
|
| 1273 |
+
pooled_prompt_embeds,
|
| 1274 |
+
negative_pooled_prompt_embeds,
|
| 1275 |
+
) = self.encode_prompt(
|
| 1276 |
+
prompt=prompt,
|
| 1277 |
+
prompt_2=prompt_2,
|
| 1278 |
+
device=device,
|
| 1279 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1280 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1281 |
+
negative_prompt=negative_prompt,
|
| 1282 |
+
negative_prompt_2=negative_prompt_2,
|
| 1283 |
+
prompt_embeds=prompt_embeds,
|
| 1284 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1285 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1286 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1287 |
+
lora_scale=lora_scale,
|
| 1288 |
+
clip_skip=self.clip_skip,
|
| 1289 |
+
)
|
| 1290 |
+
|
| 1291 |
+
# 5. Prepare latent variables
|
| 1292 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1293 |
+
latents = self.prepare_latents(
|
| 1294 |
+
batch_size * num_images_per_prompt,
|
| 1295 |
+
num_channels_latents,
|
| 1296 |
+
height,
|
| 1297 |
+
width,
|
| 1298 |
+
prompt_embeds.dtype,
|
| 1299 |
+
device,
|
| 1300 |
+
generator,
|
| 1301 |
+
latents,
|
| 1302 |
+
)
|
| 1303 |
+
|
| 1304 |
+
# 7. Prepare added time ids & embeddings
|
| 1305 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1306 |
+
if self.text_encoder_2 is None:
|
| 1307 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1308 |
+
else:
|
| 1309 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1310 |
+
|
| 1311 |
+
add_time_ids = self._get_add_time_ids(
|
| 1312 |
+
original_size,
|
| 1313 |
+
crops_coords_top_left,
|
| 1314 |
+
target_size,
|
| 1315 |
+
dtype=prompt_embeds.dtype,
|
| 1316 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1317 |
+
)
|
| 1318 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1319 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1320 |
+
negative_original_size,
|
| 1321 |
+
negative_crops_coords_top_left,
|
| 1322 |
+
negative_target_size,
|
| 1323 |
+
dtype=prompt_embeds.dtype,
|
| 1324 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1325 |
+
)
|
| 1326 |
+
else:
|
| 1327 |
+
negative_add_time_ids = add_time_ids
|
| 1328 |
+
|
| 1329 |
+
if self.do_classifier_free_guidance:
|
| 1330 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1331 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1332 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 1333 |
+
|
| 1334 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1335 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1336 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1337 |
+
|
| 1338 |
+
if ip_adapter_image is not None:
|
| 1339 |
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
| 1340 |
+
if self.do_classifier_free_guidance:
|
| 1341 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1342 |
+
image_embeds = image_embeds.to(device)
|
| 1343 |
+
|
| 1344 |
+
dummy = torch.ones(1, dtype=torch.int32)
|
| 1345 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1346 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
|
| 1347 |
+
|
| 1348 |
+
# predict the noise residual
|
| 1349 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1350 |
+
if ip_adapter_image is not None:
|
| 1351 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1352 |
+
|
| 1353 |
+
if not output_type == "latent":
|
| 1354 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1355 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1356 |
+
|
| 1357 |
+
if needs_upcasting:
|
| 1358 |
+
self.upcast_vae()
|
| 1359 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1360 |
+
|
| 1361 |
+
# cast back to fp16 if needed
|
| 1362 |
+
if needs_upcasting:
|
| 1363 |
+
self.vae.to(dtype=torch.float16)
|
| 1364 |
+
|
| 1365 |
+
self.unet = self.unet.to(memory_format=torch.channels_last)
|
| 1366 |
+
self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
|
| 1367 |
+
self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
|
| 1368 |
+
|
| 1369 |
+
unet_input_example = {
|
| 1370 |
+
"sample": latent_model_input,
|
| 1371 |
+
"timestep": dummy,
|
| 1372 |
+
"encoder_hidden_states": prompt_embeds,
|
| 1373 |
+
"added_cond_kwargs": added_cond_kwargs,
|
| 1374 |
+
}
|
| 1375 |
+
|
| 1376 |
+
vae_decoder_input_example = latents
|
| 1377 |
+
|
| 1378 |
+
# optimize with ipex
|
| 1379 |
+
if dtype == torch.bfloat16:
|
| 1380 |
+
self.unet = ipex.optimize(
|
| 1381 |
+
self.unet.eval(),
|
| 1382 |
+
dtype=torch.bfloat16,
|
| 1383 |
+
inplace=True,
|
| 1384 |
+
)
|
| 1385 |
+
self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
|
| 1386 |
+
self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
|
| 1387 |
+
elif dtype == torch.float32:
|
| 1388 |
+
self.unet = ipex.optimize(
|
| 1389 |
+
self.unet.eval(),
|
| 1390 |
+
dtype=torch.float32,
|
| 1391 |
+
inplace=True,
|
| 1392 |
+
level="O1",
|
| 1393 |
+
weights_prepack=True,
|
| 1394 |
+
auto_kernel_selection=False,
|
| 1395 |
+
)
|
| 1396 |
+
self.vae.decoder = ipex.optimize(
|
| 1397 |
+
self.vae.decoder.eval(),
|
| 1398 |
+
dtype=torch.float32,
|
| 1399 |
+
inplace=True,
|
| 1400 |
+
level="O1",
|
| 1401 |
+
weights_prepack=True,
|
| 1402 |
+
auto_kernel_selection=False,
|
| 1403 |
+
)
|
| 1404 |
+
self.text_encoder = ipex.optimize(
|
| 1405 |
+
self.text_encoder.eval(),
|
| 1406 |
+
dtype=torch.float32,
|
| 1407 |
+
inplace=True,
|
| 1408 |
+
level="O1",
|
| 1409 |
+
weights_prepack=True,
|
| 1410 |
+
auto_kernel_selection=False,
|
| 1411 |
+
)
|
| 1412 |
+
else:
|
| 1413 |
+
raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
|
| 1414 |
+
|
| 1415 |
+
# trace unet model to get better performance on IPEX
|
| 1416 |
+
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
| 1417 |
+
unet_trace_model = torch.jit.trace(
|
| 1418 |
+
self.unet, example_kwarg_inputs=unet_input_example, check_trace=False, strict=False
|
| 1419 |
+
)
|
| 1420 |
+
unet_trace_model = torch.jit.freeze(unet_trace_model)
|
| 1421 |
+
self.unet.forward = unet_trace_model.forward
|
| 1422 |
+
|
| 1423 |
+
# trace vae.decoder model to get better performance on IPEX
|
| 1424 |
+
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
| 1425 |
+
vae_decoder_trace_model = torch.jit.trace(
|
| 1426 |
+
self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
|
| 1427 |
+
)
|
| 1428 |
+
vae_decoder_trace_model = torch.jit.freeze(vae_decoder_trace_model)
|
| 1429 |
+
self.vae.decoder.forward = vae_decoder_trace_model.forward
|
v0.27.0/pipeline_zero1to3.py
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023
|
| 2 |
+
# by Xin Kong
|
| 3 |
+
|
| 4 |
+
import inspect
|
| 5 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 6 |
+
|
| 7 |
+
import kornia
|
| 8 |
+
import numpy as np
|
| 9 |
+
import PIL.Image
|
| 10 |
+
import torch
|
| 11 |
+
from packaging import version
|
| 12 |
+
from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
|
| 13 |
+
|
| 14 |
+
# from ...configuration_utils import FrozenDict
|
| 15 |
+
# from ...models import AutoencoderKL, UNet2DConditionModel
|
| 16 |
+
# from ...schedulers import KarrasDiffusionSchedulers
|
| 17 |
+
# from ...utils import (
|
| 18 |
+
# deprecate,
|
| 19 |
+
# is_accelerate_available,
|
| 20 |
+
# is_accelerate_version,
|
| 21 |
+
# logging,
|
| 22 |
+
# randn_tensor,
|
| 23 |
+
# replace_example_docstring,
|
| 24 |
+
# )
|
| 25 |
+
# from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 26 |
+
# from . import StableDiffusionPipelineOutput
|
| 27 |
+
# from .safety_checker import StableDiffusionSafetyChecker
|
| 28 |
+
from diffusers import AutoencoderKL, DiffusionPipeline, StableDiffusionMixin, UNet2DConditionModel
|
| 29 |
+
from diffusers.configuration_utils import ConfigMixin, FrozenDict
|
| 30 |
+
from diffusers.models.modeling_utils import ModelMixin
|
| 31 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
| 32 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 33 |
+
from diffusers.utils import (
|
| 34 |
+
deprecate,
|
| 35 |
+
logging,
|
| 36 |
+
replace_example_docstring,
|
| 37 |
+
)
|
| 38 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 42 |
+
# todo
|
| 43 |
+
EXAMPLE_DOC_STRING = """
|
| 44 |
+
Examples:
|
| 45 |
+
```py
|
| 46 |
+
>>> import torch
|
| 47 |
+
>>> from diffusers import StableDiffusionPipeline
|
| 48 |
+
|
| 49 |
+
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 50 |
+
>>> pipe = pipe.to("cuda")
|
| 51 |
+
|
| 52 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 53 |
+
>>> image = pipe(prompt).images[0]
|
| 54 |
+
```
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class CCProjection(ModelMixin, ConfigMixin):
|
| 59 |
+
def __init__(self, in_channel=772, out_channel=768):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.in_channel = in_channel
|
| 62 |
+
self.out_channel = out_channel
|
| 63 |
+
self.projection = torch.nn.Linear(in_channel, out_channel)
|
| 64 |
+
|
| 65 |
+
def forward(self, x):
|
| 66 |
+
return self.projection(x)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 70 |
+
r"""
|
| 71 |
+
Pipeline for single view conditioned novel view generation using Zero1to3.
|
| 72 |
+
|
| 73 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 74 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
vae ([`AutoencoderKL`]):
|
| 78 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 79 |
+
image_encoder ([`CLIPVisionModelWithProjection`]):
|
| 80 |
+
Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of
|
| 81 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),
|
| 82 |
+
specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 83 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 84 |
+
scheduler ([`SchedulerMixin`]):
|
| 85 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 86 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 90 |
+
feature_extractor ([`CLIPFeatureExtractor`]):
|
| 91 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 92 |
+
cc_projection ([`CCProjection`]):
|
| 93 |
+
Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 97 |
+
|
| 98 |
+
def __init__(
|
| 99 |
+
self,
|
| 100 |
+
vae: AutoencoderKL,
|
| 101 |
+
image_encoder: CLIPVisionModelWithProjection,
|
| 102 |
+
unet: UNet2DConditionModel,
|
| 103 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 104 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 105 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 106 |
+
cc_projection: CCProjection,
|
| 107 |
+
requires_safety_checker: bool = True,
|
| 108 |
+
):
|
| 109 |
+
super().__init__()
|
| 110 |
+
|
| 111 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 112 |
+
deprecation_message = (
|
| 113 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 114 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 115 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 116 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 117 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 118 |
+
" file"
|
| 119 |
+
)
|
| 120 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 121 |
+
new_config = dict(scheduler.config)
|
| 122 |
+
new_config["steps_offset"] = 1
|
| 123 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 124 |
+
|
| 125 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 126 |
+
deprecation_message = (
|
| 127 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 128 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 129 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 130 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 131 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 132 |
+
)
|
| 133 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 134 |
+
new_config = dict(scheduler.config)
|
| 135 |
+
new_config["clip_sample"] = False
|
| 136 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 137 |
+
|
| 138 |
+
if safety_checker is None and requires_safety_checker:
|
| 139 |
+
logger.warning(
|
| 140 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 141 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 142 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 143 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 144 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 145 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
if safety_checker is not None and feature_extractor is None:
|
| 149 |
+
raise ValueError(
|
| 150 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 151 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 155 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 156 |
+
) < version.parse("0.9.0.dev0")
|
| 157 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 158 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 159 |
+
deprecation_message = (
|
| 160 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 161 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 162 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 163 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 164 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 165 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 166 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 167 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 168 |
+
" the `unet/config.json` file"
|
| 169 |
+
)
|
| 170 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 171 |
+
new_config = dict(unet.config)
|
| 172 |
+
new_config["sample_size"] = 64
|
| 173 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 174 |
+
|
| 175 |
+
self.register_modules(
|
| 176 |
+
vae=vae,
|
| 177 |
+
image_encoder=image_encoder,
|
| 178 |
+
unet=unet,
|
| 179 |
+
scheduler=scheduler,
|
| 180 |
+
safety_checker=safety_checker,
|
| 181 |
+
feature_extractor=feature_extractor,
|
| 182 |
+
cc_projection=cc_projection,
|
| 183 |
+
)
|
| 184 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 185 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 186 |
+
# self.model_mode = None
|
| 187 |
+
|
| 188 |
+
def _encode_prompt(
|
| 189 |
+
self,
|
| 190 |
+
prompt,
|
| 191 |
+
device,
|
| 192 |
+
num_images_per_prompt,
|
| 193 |
+
do_classifier_free_guidance,
|
| 194 |
+
negative_prompt=None,
|
| 195 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 196 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 197 |
+
):
|
| 198 |
+
r"""
|
| 199 |
+
Encodes the prompt into text encoder hidden states.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 203 |
+
prompt to be encoded
|
| 204 |
+
device: (`torch.device`):
|
| 205 |
+
torch device
|
| 206 |
+
num_images_per_prompt (`int`):
|
| 207 |
+
number of images that should be generated per prompt
|
| 208 |
+
do_classifier_free_guidance (`bool`):
|
| 209 |
+
whether to use classifier free guidance or not
|
| 210 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 211 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 212 |
+
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
| 213 |
+
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
| 214 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 215 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 216 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 217 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 218 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 219 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 220 |
+
argument.
|
| 221 |
+
"""
|
| 222 |
+
if prompt is not None and isinstance(prompt, str):
|
| 223 |
+
batch_size = 1
|
| 224 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 225 |
+
batch_size = len(prompt)
|
| 226 |
+
else:
|
| 227 |
+
batch_size = prompt_embeds.shape[0]
|
| 228 |
+
|
| 229 |
+
if prompt_embeds is None:
|
| 230 |
+
text_inputs = self.tokenizer(
|
| 231 |
+
prompt,
|
| 232 |
+
padding="max_length",
|
| 233 |
+
max_length=self.tokenizer.model_max_length,
|
| 234 |
+
truncation=True,
|
| 235 |
+
return_tensors="pt",
|
| 236 |
+
)
|
| 237 |
+
text_input_ids = text_inputs.input_ids
|
| 238 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 239 |
+
|
| 240 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 241 |
+
text_input_ids, untruncated_ids
|
| 242 |
+
):
|
| 243 |
+
removed_text = self.tokenizer.batch_decode(
|
| 244 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 245 |
+
)
|
| 246 |
+
logger.warning(
|
| 247 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 248 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 252 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 253 |
+
else:
|
| 254 |
+
attention_mask = None
|
| 255 |
+
|
| 256 |
+
prompt_embeds = self.text_encoder(
|
| 257 |
+
text_input_ids.to(device),
|
| 258 |
+
attention_mask=attention_mask,
|
| 259 |
+
)
|
| 260 |
+
prompt_embeds = prompt_embeds[0]
|
| 261 |
+
|
| 262 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
| 263 |
+
|
| 264 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 265 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 266 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 267 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 268 |
+
|
| 269 |
+
# get unconditional embeddings for classifier free guidance
|
| 270 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 271 |
+
uncond_tokens: List[str]
|
| 272 |
+
if negative_prompt is None:
|
| 273 |
+
uncond_tokens = [""] * batch_size
|
| 274 |
+
elif type(prompt) is not type(negative_prompt):
|
| 275 |
+
raise TypeError(
|
| 276 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 277 |
+
f" {type(prompt)}."
|
| 278 |
+
)
|
| 279 |
+
elif isinstance(negative_prompt, str):
|
| 280 |
+
uncond_tokens = [negative_prompt]
|
| 281 |
+
elif batch_size != len(negative_prompt):
|
| 282 |
+
raise ValueError(
|
| 283 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 284 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 285 |
+
" the batch size of `prompt`."
|
| 286 |
+
)
|
| 287 |
+
else:
|
| 288 |
+
uncond_tokens = negative_prompt
|
| 289 |
+
|
| 290 |
+
max_length = prompt_embeds.shape[1]
|
| 291 |
+
uncond_input = self.tokenizer(
|
| 292 |
+
uncond_tokens,
|
| 293 |
+
padding="max_length",
|
| 294 |
+
max_length=max_length,
|
| 295 |
+
truncation=True,
|
| 296 |
+
return_tensors="pt",
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 300 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 301 |
+
else:
|
| 302 |
+
attention_mask = None
|
| 303 |
+
|
| 304 |
+
negative_prompt_embeds = self.text_encoder(
|
| 305 |
+
uncond_input.input_ids.to(device),
|
| 306 |
+
attention_mask=attention_mask,
|
| 307 |
+
)
|
| 308 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 309 |
+
|
| 310 |
+
if do_classifier_free_guidance:
|
| 311 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 312 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 313 |
+
|
| 314 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
| 315 |
+
|
| 316 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 317 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 318 |
+
|
| 319 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 320 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 321 |
+
# to avoid doing two forward passes
|
| 322 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 323 |
+
|
| 324 |
+
return prompt_embeds
|
| 325 |
+
|
| 326 |
+
def CLIP_preprocess(self, x):
|
| 327 |
+
dtype = x.dtype
|
| 328 |
+
# following openai's implementation
|
| 329 |
+
# TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741
|
| 330 |
+
# follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608
|
| 331 |
+
if isinstance(x, torch.Tensor):
|
| 332 |
+
if x.min() < -1.0 or x.max() > 1.0:
|
| 333 |
+
raise ValueError("Expected input tensor to have values in the range [-1, 1]")
|
| 334 |
+
x = kornia.geometry.resize(
|
| 335 |
+
x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False
|
| 336 |
+
).to(dtype=dtype)
|
| 337 |
+
x = (x + 1.0) / 2.0
|
| 338 |
+
# renormalize according to clip
|
| 339 |
+
x = kornia.enhance.normalize(
|
| 340 |
+
x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])
|
| 341 |
+
)
|
| 342 |
+
return x
|
| 343 |
+
|
| 344 |
+
# from image_variation
|
| 345 |
+
def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
|
| 346 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 347 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 348 |
+
raise ValueError(
|
| 349 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
if isinstance(image, torch.Tensor):
|
| 353 |
+
# Batch single image
|
| 354 |
+
if image.ndim == 3:
|
| 355 |
+
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
|
| 356 |
+
image = image.unsqueeze(0)
|
| 357 |
+
|
| 358 |
+
assert image.ndim == 4, "Image must have 4 dimensions"
|
| 359 |
+
|
| 360 |
+
# Check image is in [-1, 1]
|
| 361 |
+
if image.min() < -1 or image.max() > 1:
|
| 362 |
+
raise ValueError("Image should be in [-1, 1] range")
|
| 363 |
+
else:
|
| 364 |
+
# preprocess image
|
| 365 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 366 |
+
image = [image]
|
| 367 |
+
|
| 368 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 369 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 370 |
+
image = np.concatenate(image, axis=0)
|
| 371 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 372 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 373 |
+
|
| 374 |
+
image = image.transpose(0, 3, 1, 2)
|
| 375 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 376 |
+
|
| 377 |
+
image = image.to(device=device, dtype=dtype)
|
| 378 |
+
|
| 379 |
+
image = self.CLIP_preprocess(image)
|
| 380 |
+
# if not isinstance(image, torch.Tensor):
|
| 381 |
+
# # 0-255
|
| 382 |
+
# print("Warning: image is processed by hf's preprocess, which is different from openai original's.")
|
| 383 |
+
# image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
|
| 384 |
+
image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)
|
| 385 |
+
image_embeddings = image_embeddings.unsqueeze(1)
|
| 386 |
+
|
| 387 |
+
# duplicate image embeddings for each generation per prompt, using mps friendly method
|
| 388 |
+
bs_embed, seq_len, _ = image_embeddings.shape
|
| 389 |
+
image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 390 |
+
image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 391 |
+
|
| 392 |
+
if do_classifier_free_guidance:
|
| 393 |
+
negative_prompt_embeds = torch.zeros_like(image_embeddings)
|
| 394 |
+
|
| 395 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 396 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 397 |
+
# to avoid doing two forward passes
|
| 398 |
+
image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
|
| 399 |
+
|
| 400 |
+
return image_embeddings
|
| 401 |
+
|
| 402 |
+
def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance):
|
| 403 |
+
dtype = next(self.cc_projection.parameters()).dtype
|
| 404 |
+
if isinstance(pose, torch.Tensor):
|
| 405 |
+
pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)
|
| 406 |
+
else:
|
| 407 |
+
if isinstance(pose[0], list):
|
| 408 |
+
pose = torch.Tensor(pose)
|
| 409 |
+
else:
|
| 410 |
+
pose = torch.Tensor([pose])
|
| 411 |
+
x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)
|
| 412 |
+
pose_embeddings = (
|
| 413 |
+
torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)
|
| 414 |
+
.unsqueeze(1)
|
| 415 |
+
.to(device=device, dtype=dtype)
|
| 416 |
+
) # B, 1, 4
|
| 417 |
+
# duplicate pose embeddings for each generation per prompt, using mps friendly method
|
| 418 |
+
bs_embed, seq_len, _ = pose_embeddings.shape
|
| 419 |
+
pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 420 |
+
pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 421 |
+
if do_classifier_free_guidance:
|
| 422 |
+
negative_prompt_embeds = torch.zeros_like(pose_embeddings)
|
| 423 |
+
|
| 424 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 425 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 426 |
+
# to avoid doing two forward passes
|
| 427 |
+
pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])
|
| 428 |
+
return pose_embeddings
|
| 429 |
+
|
| 430 |
+
def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance):
|
| 431 |
+
img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)
|
| 432 |
+
pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)
|
| 433 |
+
prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)
|
| 434 |
+
prompt_embeds = self.cc_projection(prompt_embeds)
|
| 435 |
+
# prompt_embeds = img_prompt_embeds
|
| 436 |
+
# follow 0123, add negative prompt, after projection
|
| 437 |
+
if do_classifier_free_guidance:
|
| 438 |
+
negative_prompt = torch.zeros_like(prompt_embeds)
|
| 439 |
+
prompt_embeds = torch.cat([negative_prompt, prompt_embeds])
|
| 440 |
+
return prompt_embeds
|
| 441 |
+
|
| 442 |
+
def run_safety_checker(self, image, device, dtype):
|
| 443 |
+
if self.safety_checker is not None:
|
| 444 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 445 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 446 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 447 |
+
)
|
| 448 |
+
else:
|
| 449 |
+
has_nsfw_concept = None
|
| 450 |
+
return image, has_nsfw_concept
|
| 451 |
+
|
| 452 |
+
def decode_latents(self, latents):
|
| 453 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 454 |
+
image = self.vae.decode(latents).sample
|
| 455 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 456 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 457 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 458 |
+
return image
|
| 459 |
+
|
| 460 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 461 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 462 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 463 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 464 |
+
# and should be between [0, 1]
|
| 465 |
+
|
| 466 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 467 |
+
extra_step_kwargs = {}
|
| 468 |
+
if accepts_eta:
|
| 469 |
+
extra_step_kwargs["eta"] = eta
|
| 470 |
+
|
| 471 |
+
# check if the scheduler accepts generator
|
| 472 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 473 |
+
if accepts_generator:
|
| 474 |
+
extra_step_kwargs["generator"] = generator
|
| 475 |
+
return extra_step_kwargs
|
| 476 |
+
|
| 477 |
+
def check_inputs(self, image, height, width, callback_steps):
|
| 478 |
+
if (
|
| 479 |
+
not isinstance(image, torch.Tensor)
|
| 480 |
+
and not isinstance(image, PIL.Image.Image)
|
| 481 |
+
and not isinstance(image, list)
|
| 482 |
+
):
|
| 483 |
+
raise ValueError(
|
| 484 |
+
"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
|
| 485 |
+
f" {type(image)}"
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 489 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 490 |
+
|
| 491 |
+
if (callback_steps is None) or (
|
| 492 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 493 |
+
):
|
| 494 |
+
raise ValueError(
|
| 495 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 496 |
+
f" {type(callback_steps)}."
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 500 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 501 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 502 |
+
raise ValueError(
|
| 503 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 504 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
if latents is None:
|
| 508 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 509 |
+
else:
|
| 510 |
+
latents = latents.to(device)
|
| 511 |
+
|
| 512 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 513 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 514 |
+
return latents
|
| 515 |
+
|
| 516 |
+
def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False):
|
| 517 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 518 |
+
raise ValueError(
|
| 519 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
if isinstance(image, torch.Tensor):
|
| 523 |
+
# Batch single image
|
| 524 |
+
if image.ndim == 3:
|
| 525 |
+
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
|
| 526 |
+
image = image.unsqueeze(0)
|
| 527 |
+
|
| 528 |
+
assert image.ndim == 4, "Image must have 4 dimensions"
|
| 529 |
+
|
| 530 |
+
# Check image is in [-1, 1]
|
| 531 |
+
if image.min() < -1 or image.max() > 1:
|
| 532 |
+
raise ValueError("Image should be in [-1, 1] range")
|
| 533 |
+
else:
|
| 534 |
+
# preprocess image
|
| 535 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 536 |
+
image = [image]
|
| 537 |
+
|
| 538 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 539 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 540 |
+
image = np.concatenate(image, axis=0)
|
| 541 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 542 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 543 |
+
|
| 544 |
+
image = image.transpose(0, 3, 1, 2)
|
| 545 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 546 |
+
|
| 547 |
+
image = image.to(device=device, dtype=dtype)
|
| 548 |
+
|
| 549 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 550 |
+
raise ValueError(
|
| 551 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 552 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
if isinstance(generator, list):
|
| 556 |
+
init_latents = [
|
| 557 |
+
self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i])
|
| 558 |
+
for i in range(batch_size) # sample
|
| 559 |
+
]
|
| 560 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 561 |
+
else:
|
| 562 |
+
init_latents = self.vae.encode(image).latent_dist.mode()
|
| 563 |
+
|
| 564 |
+
# init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor
|
| 565 |
+
if batch_size > init_latents.shape[0]:
|
| 566 |
+
# init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)
|
| 567 |
+
num_images_per_prompt = batch_size // init_latents.shape[0]
|
| 568 |
+
# duplicate image latents for each generation per prompt, using mps friendly method
|
| 569 |
+
bs_embed, emb_c, emb_h, emb_w = init_latents.shape
|
| 570 |
+
init_latents = init_latents.unsqueeze(1)
|
| 571 |
+
init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)
|
| 572 |
+
init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)
|
| 573 |
+
|
| 574 |
+
# init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123
|
| 575 |
+
init_latents = (
|
| 576 |
+
torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
init_latents = init_latents.to(device=device, dtype=dtype)
|
| 580 |
+
return init_latents
|
| 581 |
+
|
| 582 |
+
# def load_cc_projection(self, pretrained_weights=None):
|
| 583 |
+
# self.cc_projection = torch.nn.Linear(772, 768)
|
| 584 |
+
# torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
|
| 585 |
+
# torch.nn.init.zeros_(list(self.cc_projection.parameters())[1])
|
| 586 |
+
# if pretrained_weights is not None:
|
| 587 |
+
# self.cc_projection.load_state_dict(pretrained_weights)
|
| 588 |
+
|
| 589 |
+
@torch.no_grad()
|
| 590 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 591 |
+
def __call__(
|
| 592 |
+
self,
|
| 593 |
+
input_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 594 |
+
prompt_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 595 |
+
poses: Union[List[float], List[List[float]]] = None,
|
| 596 |
+
torch_dtype=torch.float32,
|
| 597 |
+
height: Optional[int] = None,
|
| 598 |
+
width: Optional[int] = None,
|
| 599 |
+
num_inference_steps: int = 50,
|
| 600 |
+
guidance_scale: float = 3.0,
|
| 601 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 602 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 603 |
+
eta: float = 0.0,
|
| 604 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 605 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 606 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 607 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 608 |
+
output_type: Optional[str] = "pil",
|
| 609 |
+
return_dict: bool = True,
|
| 610 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 611 |
+
callback_steps: int = 1,
|
| 612 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 613 |
+
controlnet_conditioning_scale: float = 1.0,
|
| 614 |
+
):
|
| 615 |
+
r"""
|
| 616 |
+
Function invoked when calling the pipeline for generation.
|
| 617 |
+
|
| 618 |
+
Args:
|
| 619 |
+
input_imgs (`PIL` or `List[PIL]`, *optional*):
|
| 620 |
+
The single input image for each 3D object
|
| 621 |
+
prompt_imgs (`PIL` or `List[PIL]`, *optional*):
|
| 622 |
+
Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature
|
| 623 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 624 |
+
The height in pixels of the generated image.
|
| 625 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 626 |
+
The width in pixels of the generated image.
|
| 627 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 628 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 629 |
+
expense of slower inference.
|
| 630 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 631 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 632 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 633 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 634 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 635 |
+
usually at the expense of lower image quality.
|
| 636 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 637 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 638 |
+
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
| 639 |
+
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
| 640 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 641 |
+
The number of images to generate per prompt.
|
| 642 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 643 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 644 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 645 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 646 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 647 |
+
to make generation deterministic.
|
| 648 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 649 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 650 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 651 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 652 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 653 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 654 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 655 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 656 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 657 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 658 |
+
argument.
|
| 659 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 660 |
+
The output format of the generate image. Choose between
|
| 661 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 662 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 663 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 664 |
+
plain tuple.
|
| 665 |
+
callback (`Callable`, *optional*):
|
| 666 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 667 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 668 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 669 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 670 |
+
called at every step.
|
| 671 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 672 |
+
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
|
| 673 |
+
`self.processor` in
|
| 674 |
+
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
| 675 |
+
|
| 676 |
+
Examples:
|
| 677 |
+
|
| 678 |
+
Returns:
|
| 679 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 680 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 681 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 682 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 683 |
+
(nsfw) content, according to the `safety_checker`.
|
| 684 |
+
"""
|
| 685 |
+
# 0. Default height and width to unet
|
| 686 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 687 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 688 |
+
|
| 689 |
+
# 1. Check inputs. Raise error if not correct
|
| 690 |
+
# input_image = hint_imgs
|
| 691 |
+
self.check_inputs(input_imgs, height, width, callback_steps)
|
| 692 |
+
|
| 693 |
+
# 2. Define call parameters
|
| 694 |
+
if isinstance(input_imgs, PIL.Image.Image):
|
| 695 |
+
batch_size = 1
|
| 696 |
+
elif isinstance(input_imgs, list):
|
| 697 |
+
batch_size = len(input_imgs)
|
| 698 |
+
else:
|
| 699 |
+
batch_size = input_imgs.shape[0]
|
| 700 |
+
device = self._execution_device
|
| 701 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 702 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 703 |
+
# corresponds to doing no classifier free guidance.
|
| 704 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 705 |
+
|
| 706 |
+
# 3. Encode input image with pose as prompt
|
| 707 |
+
prompt_embeds = self._encode_image_with_pose(
|
| 708 |
+
prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
# 4. Prepare timesteps
|
| 712 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 713 |
+
timesteps = self.scheduler.timesteps
|
| 714 |
+
|
| 715 |
+
# 5. Prepare latent variables
|
| 716 |
+
latents = self.prepare_latents(
|
| 717 |
+
batch_size * num_images_per_prompt,
|
| 718 |
+
4,
|
| 719 |
+
height,
|
| 720 |
+
width,
|
| 721 |
+
prompt_embeds.dtype,
|
| 722 |
+
device,
|
| 723 |
+
generator,
|
| 724 |
+
latents,
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
# 6. Prepare image latents
|
| 728 |
+
img_latents = self.prepare_img_latents(
|
| 729 |
+
input_imgs,
|
| 730 |
+
batch_size * num_images_per_prompt,
|
| 731 |
+
prompt_embeds.dtype,
|
| 732 |
+
device,
|
| 733 |
+
generator,
|
| 734 |
+
do_classifier_free_guidance,
|
| 735 |
+
)
|
| 736 |
+
|
| 737 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 738 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 739 |
+
|
| 740 |
+
# 7. Denoising loop
|
| 741 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 742 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 743 |
+
for i, t in enumerate(timesteps):
|
| 744 |
+
# expand the latents if we are doing classifier free guidance
|
| 745 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 746 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 747 |
+
latent_model_input = torch.cat([latent_model_input, img_latents], dim=1)
|
| 748 |
+
|
| 749 |
+
# predict the noise residual
|
| 750 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
|
| 751 |
+
|
| 752 |
+
# perform guidance
|
| 753 |
+
if do_classifier_free_guidance:
|
| 754 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 755 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 756 |
+
|
| 757 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 758 |
+
# latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype)
|
| 759 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 760 |
+
|
| 761 |
+
# call the callback, if provided
|
| 762 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 763 |
+
progress_bar.update()
|
| 764 |
+
if callback is not None and i % callback_steps == 0:
|
| 765 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 766 |
+
callback(step_idx, t, latents)
|
| 767 |
+
|
| 768 |
+
# 8. Post-processing
|
| 769 |
+
has_nsfw_concept = None
|
| 770 |
+
if output_type == "latent":
|
| 771 |
+
image = latents
|
| 772 |
+
elif output_type == "pil":
|
| 773 |
+
# 8. Post-processing
|
| 774 |
+
image = self.decode_latents(latents)
|
| 775 |
+
# 10. Convert to PIL
|
| 776 |
+
image = self.numpy_to_pil(image)
|
| 777 |
+
else:
|
| 778 |
+
# 8. Post-processing
|
| 779 |
+
image = self.decode_latents(latents)
|
| 780 |
+
|
| 781 |
+
# Offload last model to CPU
|
| 782 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 783 |
+
self.final_offload_hook.offload()
|
| 784 |
+
|
| 785 |
+
if not return_dict:
|
| 786 |
+
return (image, has_nsfw_concept)
|
| 787 |
+
|
| 788 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.27.0/regional_prompting_stable_diffusion.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Dict, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torchvision.transforms.functional as FF
|
| 6 |
+
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
| 7 |
+
|
| 8 |
+
from diffusers import StableDiffusionPipeline
|
| 9 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 10 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 11 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 12 |
+
from diffusers.utils import USE_PEFT_BACKEND
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from compel import Compel
|
| 17 |
+
except ImportError:
|
| 18 |
+
Compel = None
|
| 19 |
+
|
| 20 |
+
KCOMM = "ADDCOMM"
|
| 21 |
+
KBRK = "BREAK"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
|
| 25 |
+
r"""
|
| 26 |
+
Args for Regional Prompting Pipeline:
|
| 27 |
+
rp_args:dict
|
| 28 |
+
Required
|
| 29 |
+
rp_args["mode"]: cols, rows, prompt, prompt-ex
|
| 30 |
+
for cols, rows mode
|
| 31 |
+
rp_args["div"]: ex) 1;1;1(Divide into 3 regions)
|
| 32 |
+
for prompt, prompt-ex mode
|
| 33 |
+
rp_args["th"]: ex) 0.5,0.5,0.6 (threshold for prompt mode)
|
| 34 |
+
|
| 35 |
+
Optional
|
| 36 |
+
rp_args["save_mask"]: True/False (save masks in prompt mode)
|
| 37 |
+
|
| 38 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 39 |
+
|
| 40 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 41 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
vae ([`AutoencoderKL`]):
|
| 45 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 46 |
+
text_encoder ([`CLIPTextModel`]):
|
| 47 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 48 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 49 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 50 |
+
tokenizer (`CLIPTokenizer`):
|
| 51 |
+
Tokenizer of class
|
| 52 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 53 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 54 |
+
scheduler ([`SchedulerMixin`]):
|
| 55 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 56 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 57 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 58 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 59 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 60 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 61 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
vae: AutoencoderKL,
|
| 67 |
+
text_encoder: CLIPTextModel,
|
| 68 |
+
tokenizer: CLIPTokenizer,
|
| 69 |
+
unet: UNet2DConditionModel,
|
| 70 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 71 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 72 |
+
feature_extractor: CLIPFeatureExtractor,
|
| 73 |
+
requires_safety_checker: bool = True,
|
| 74 |
+
):
|
| 75 |
+
super().__init__(
|
| 76 |
+
vae,
|
| 77 |
+
text_encoder,
|
| 78 |
+
tokenizer,
|
| 79 |
+
unet,
|
| 80 |
+
scheduler,
|
| 81 |
+
safety_checker,
|
| 82 |
+
feature_extractor,
|
| 83 |
+
requires_safety_checker,
|
| 84 |
+
)
|
| 85 |
+
self.register_modules(
|
| 86 |
+
vae=vae,
|
| 87 |
+
text_encoder=text_encoder,
|
| 88 |
+
tokenizer=tokenizer,
|
| 89 |
+
unet=unet,
|
| 90 |
+
scheduler=scheduler,
|
| 91 |
+
safety_checker=safety_checker,
|
| 92 |
+
feature_extractor=feature_extractor,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
@torch.no_grad()
|
| 96 |
+
def __call__(
|
| 97 |
+
self,
|
| 98 |
+
prompt: str,
|
| 99 |
+
height: int = 512,
|
| 100 |
+
width: int = 512,
|
| 101 |
+
num_inference_steps: int = 50,
|
| 102 |
+
guidance_scale: float = 7.5,
|
| 103 |
+
negative_prompt: str = None,
|
| 104 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 105 |
+
eta: float = 0.0,
|
| 106 |
+
generator: Optional[torch.Generator] = None,
|
| 107 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 108 |
+
output_type: Optional[str] = "pil",
|
| 109 |
+
return_dict: bool = True,
|
| 110 |
+
rp_args: Dict[str, str] = None,
|
| 111 |
+
):
|
| 112 |
+
active = KBRK in prompt[0] if isinstance(prompt, list) else KBRK in prompt
|
| 113 |
+
if negative_prompt is None:
|
| 114 |
+
negative_prompt = "" if isinstance(prompt, str) else [""] * len(prompt)
|
| 115 |
+
|
| 116 |
+
device = self._execution_device
|
| 117 |
+
regions = 0
|
| 118 |
+
|
| 119 |
+
self.power = int(rp_args["power"]) if "power" in rp_args else 1
|
| 120 |
+
|
| 121 |
+
prompts = prompt if isinstance(prompt, list) else [prompt]
|
| 122 |
+
n_prompts = negative_prompt if isinstance(prompt, str) else [negative_prompt]
|
| 123 |
+
self.batch = batch = num_images_per_prompt * len(prompts)
|
| 124 |
+
all_prompts_cn, all_prompts_p = promptsmaker(prompts, num_images_per_prompt)
|
| 125 |
+
all_n_prompts_cn, _ = promptsmaker(n_prompts, num_images_per_prompt)
|
| 126 |
+
|
| 127 |
+
equal = len(all_prompts_cn) == len(all_n_prompts_cn)
|
| 128 |
+
|
| 129 |
+
if Compel:
|
| 130 |
+
compel = Compel(tokenizer=self.tokenizer, text_encoder=self.text_encoder)
|
| 131 |
+
|
| 132 |
+
def getcompelembs(prps):
|
| 133 |
+
embl = []
|
| 134 |
+
for prp in prps:
|
| 135 |
+
embl.append(compel.build_conditioning_tensor(prp))
|
| 136 |
+
return torch.cat(embl)
|
| 137 |
+
|
| 138 |
+
conds = getcompelembs(all_prompts_cn)
|
| 139 |
+
unconds = getcompelembs(all_n_prompts_cn)
|
| 140 |
+
embs = getcompelembs(prompts)
|
| 141 |
+
n_embs = getcompelembs(n_prompts)
|
| 142 |
+
prompt = negative_prompt = None
|
| 143 |
+
else:
|
| 144 |
+
conds = self.encode_prompt(prompts, device, 1, True)[0]
|
| 145 |
+
unconds = (
|
| 146 |
+
self.encode_prompt(n_prompts, device, 1, True)[0]
|
| 147 |
+
if equal
|
| 148 |
+
else self.encode_prompt(all_n_prompts_cn, device, 1, True)[0]
|
| 149 |
+
)
|
| 150 |
+
embs = n_embs = None
|
| 151 |
+
|
| 152 |
+
if not active:
|
| 153 |
+
pcallback = None
|
| 154 |
+
mode = None
|
| 155 |
+
else:
|
| 156 |
+
if any(x in rp_args["mode"].upper() for x in ["COL", "ROW"]):
|
| 157 |
+
mode = "COL" if "COL" in rp_args["mode"].upper() else "ROW"
|
| 158 |
+
ocells, icells, regions = make_cells(rp_args["div"])
|
| 159 |
+
|
| 160 |
+
elif "PRO" in rp_args["mode"].upper():
|
| 161 |
+
regions = len(all_prompts_p[0])
|
| 162 |
+
mode = "PROMPT"
|
| 163 |
+
reset_attnmaps(self)
|
| 164 |
+
self.ex = "EX" in rp_args["mode"].upper()
|
| 165 |
+
self.target_tokens = target_tokens = tokendealer(self, all_prompts_p)
|
| 166 |
+
thresholds = [float(x) for x in rp_args["th"].split(",")]
|
| 167 |
+
|
| 168 |
+
orig_hw = (height, width)
|
| 169 |
+
revers = True
|
| 170 |
+
|
| 171 |
+
def pcallback(s_self, step: int, timestep: int, latents: torch.FloatTensor, selfs=None):
|
| 172 |
+
if "PRO" in mode: # in Prompt mode, make masks from sum of attension maps
|
| 173 |
+
self.step = step
|
| 174 |
+
|
| 175 |
+
if len(self.attnmaps_sizes) > 3:
|
| 176 |
+
self.history[step] = self.attnmaps.copy()
|
| 177 |
+
for hw in self.attnmaps_sizes:
|
| 178 |
+
allmasks = []
|
| 179 |
+
basemasks = [None] * batch
|
| 180 |
+
for tt, th in zip(target_tokens, thresholds):
|
| 181 |
+
for b in range(batch):
|
| 182 |
+
key = f"{tt}-{b}"
|
| 183 |
+
_, mask, _ = makepmask(self, self.attnmaps[key], hw[0], hw[1], th, step)
|
| 184 |
+
mask = mask.unsqueeze(0).unsqueeze(-1)
|
| 185 |
+
if self.ex:
|
| 186 |
+
allmasks[b::batch] = [x - mask for x in allmasks[b::batch]]
|
| 187 |
+
allmasks[b::batch] = [torch.where(x > 0, 1, 0) for x in allmasks[b::batch]]
|
| 188 |
+
allmasks.append(mask)
|
| 189 |
+
basemasks[b] = mask if basemasks[b] is None else basemasks[b] + mask
|
| 190 |
+
basemasks = [1 - mask for mask in basemasks]
|
| 191 |
+
basemasks = [torch.where(x > 0, 1, 0) for x in basemasks]
|
| 192 |
+
allmasks = basemasks + allmasks
|
| 193 |
+
|
| 194 |
+
self.attnmasks[hw] = torch.cat(allmasks)
|
| 195 |
+
self.maskready = True
|
| 196 |
+
return latents
|
| 197 |
+
|
| 198 |
+
def hook_forward(module):
|
| 199 |
+
# diffusers==0.23.2
|
| 200 |
+
def forward(
|
| 201 |
+
hidden_states: torch.FloatTensor,
|
| 202 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 203 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 204 |
+
temb: Optional[torch.FloatTensor] = None,
|
| 205 |
+
scale: float = 1.0,
|
| 206 |
+
) -> torch.Tensor:
|
| 207 |
+
attn = module
|
| 208 |
+
xshape = hidden_states.shape
|
| 209 |
+
self.hw = (h, w) = split_dims(xshape[1], *orig_hw)
|
| 210 |
+
|
| 211 |
+
if revers:
|
| 212 |
+
nx, px = hidden_states.chunk(2)
|
| 213 |
+
else:
|
| 214 |
+
px, nx = hidden_states.chunk(2)
|
| 215 |
+
|
| 216 |
+
if equal:
|
| 217 |
+
hidden_states = torch.cat(
|
| 218 |
+
[px for i in range(regions)] + [nx for i in range(regions)],
|
| 219 |
+
0,
|
| 220 |
+
)
|
| 221 |
+
encoder_hidden_states = torch.cat([conds] + [unconds])
|
| 222 |
+
else:
|
| 223 |
+
hidden_states = torch.cat([px for i in range(regions)] + [nx], 0)
|
| 224 |
+
encoder_hidden_states = torch.cat([conds] + [unconds])
|
| 225 |
+
|
| 226 |
+
residual = hidden_states
|
| 227 |
+
|
| 228 |
+
args = () if USE_PEFT_BACKEND else (scale,)
|
| 229 |
+
|
| 230 |
+
if attn.spatial_norm is not None:
|
| 231 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 232 |
+
|
| 233 |
+
input_ndim = hidden_states.ndim
|
| 234 |
+
|
| 235 |
+
if input_ndim == 4:
|
| 236 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 237 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 238 |
+
|
| 239 |
+
batch_size, sequence_length, _ = (
|
| 240 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
if attention_mask is not None:
|
| 244 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 245 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 246 |
+
|
| 247 |
+
if attn.group_norm is not None:
|
| 248 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 249 |
+
|
| 250 |
+
args = () if USE_PEFT_BACKEND else (scale,)
|
| 251 |
+
query = attn.to_q(hidden_states, *args)
|
| 252 |
+
|
| 253 |
+
if encoder_hidden_states is None:
|
| 254 |
+
encoder_hidden_states = hidden_states
|
| 255 |
+
elif attn.norm_cross:
|
| 256 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 257 |
+
|
| 258 |
+
key = attn.to_k(encoder_hidden_states, *args)
|
| 259 |
+
value = attn.to_v(encoder_hidden_states, *args)
|
| 260 |
+
|
| 261 |
+
inner_dim = key.shape[-1]
|
| 262 |
+
head_dim = inner_dim // attn.heads
|
| 263 |
+
|
| 264 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 265 |
+
|
| 266 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 267 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 268 |
+
|
| 269 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 270 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 271 |
+
hidden_states = scaled_dot_product_attention(
|
| 272 |
+
self,
|
| 273 |
+
query,
|
| 274 |
+
key,
|
| 275 |
+
value,
|
| 276 |
+
attn_mask=attention_mask,
|
| 277 |
+
dropout_p=0.0,
|
| 278 |
+
is_causal=False,
|
| 279 |
+
getattn="PRO" in mode,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 283 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 284 |
+
|
| 285 |
+
# linear proj
|
| 286 |
+
hidden_states = attn.to_out[0](hidden_states, *args)
|
| 287 |
+
# dropout
|
| 288 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 289 |
+
|
| 290 |
+
if input_ndim == 4:
|
| 291 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 292 |
+
|
| 293 |
+
if attn.residual_connection:
|
| 294 |
+
hidden_states = hidden_states + residual
|
| 295 |
+
|
| 296 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 297 |
+
|
| 298 |
+
#### Regional Prompting Col/Row mode
|
| 299 |
+
if any(x in mode for x in ["COL", "ROW"]):
|
| 300 |
+
reshaped = hidden_states.reshape(hidden_states.size()[0], h, w, hidden_states.size()[2])
|
| 301 |
+
center = reshaped.shape[0] // 2
|
| 302 |
+
px = reshaped[0:center] if equal else reshaped[0:-batch]
|
| 303 |
+
nx = reshaped[center:] if equal else reshaped[-batch:]
|
| 304 |
+
outs = [px, nx] if equal else [px]
|
| 305 |
+
for out in outs:
|
| 306 |
+
c = 0
|
| 307 |
+
for i, ocell in enumerate(ocells):
|
| 308 |
+
for icell in icells[i]:
|
| 309 |
+
if "ROW" in mode:
|
| 310 |
+
out[
|
| 311 |
+
0:batch,
|
| 312 |
+
int(h * ocell[0]) : int(h * ocell[1]),
|
| 313 |
+
int(w * icell[0]) : int(w * icell[1]),
|
| 314 |
+
:,
|
| 315 |
+
] = out[
|
| 316 |
+
c * batch : (c + 1) * batch,
|
| 317 |
+
int(h * ocell[0]) : int(h * ocell[1]),
|
| 318 |
+
int(w * icell[0]) : int(w * icell[1]),
|
| 319 |
+
:,
|
| 320 |
+
]
|
| 321 |
+
else:
|
| 322 |
+
out[
|
| 323 |
+
0:batch,
|
| 324 |
+
int(h * icell[0]) : int(h * icell[1]),
|
| 325 |
+
int(w * ocell[0]) : int(w * ocell[1]),
|
| 326 |
+
:,
|
| 327 |
+
] = out[
|
| 328 |
+
c * batch : (c + 1) * batch,
|
| 329 |
+
int(h * icell[0]) : int(h * icell[1]),
|
| 330 |
+
int(w * ocell[0]) : int(w * ocell[1]),
|
| 331 |
+
:,
|
| 332 |
+
]
|
| 333 |
+
c += 1
|
| 334 |
+
px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
|
| 335 |
+
hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
|
| 336 |
+
hidden_states = hidden_states.reshape(xshape)
|
| 337 |
+
|
| 338 |
+
#### Regional Prompting Prompt mode
|
| 339 |
+
elif "PRO" in mode:
|
| 340 |
+
px, nx = (
|
| 341 |
+
torch.chunk(hidden_states) if equal else hidden_states[0:-batch],
|
| 342 |
+
hidden_states[-batch:],
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
if (h, w) in self.attnmasks and self.maskready:
|
| 346 |
+
|
| 347 |
+
def mask(input):
|
| 348 |
+
out = torch.multiply(input, self.attnmasks[(h, w)])
|
| 349 |
+
for b in range(batch):
|
| 350 |
+
for r in range(1, regions):
|
| 351 |
+
out[b] = out[b] + out[r * batch + b]
|
| 352 |
+
return out
|
| 353 |
+
|
| 354 |
+
px, nx = (mask(px), mask(nx)) if equal else (mask(px), nx)
|
| 355 |
+
px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
|
| 356 |
+
hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
|
| 357 |
+
return hidden_states
|
| 358 |
+
|
| 359 |
+
return forward
|
| 360 |
+
|
| 361 |
+
def hook_forwards(root_module: torch.nn.Module):
|
| 362 |
+
for name, module in root_module.named_modules():
|
| 363 |
+
if "attn2" in name and module.__class__.__name__ == "Attention":
|
| 364 |
+
module.forward = hook_forward(module)
|
| 365 |
+
|
| 366 |
+
hook_forwards(self.unet)
|
| 367 |
+
|
| 368 |
+
output = StableDiffusionPipeline(**self.components)(
|
| 369 |
+
prompt=prompt,
|
| 370 |
+
prompt_embeds=embs,
|
| 371 |
+
negative_prompt=negative_prompt,
|
| 372 |
+
negative_prompt_embeds=n_embs,
|
| 373 |
+
height=height,
|
| 374 |
+
width=width,
|
| 375 |
+
num_inference_steps=num_inference_steps,
|
| 376 |
+
guidance_scale=guidance_scale,
|
| 377 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 378 |
+
eta=eta,
|
| 379 |
+
generator=generator,
|
| 380 |
+
latents=latents,
|
| 381 |
+
output_type=output_type,
|
| 382 |
+
return_dict=return_dict,
|
| 383 |
+
callback_on_step_end=pcallback,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
if "save_mask" in rp_args:
|
| 387 |
+
save_mask = rp_args["save_mask"]
|
| 388 |
+
else:
|
| 389 |
+
save_mask = False
|
| 390 |
+
|
| 391 |
+
if mode == "PROMPT" and save_mask:
|
| 392 |
+
saveattnmaps(
|
| 393 |
+
self,
|
| 394 |
+
output,
|
| 395 |
+
height,
|
| 396 |
+
width,
|
| 397 |
+
thresholds,
|
| 398 |
+
num_inference_steps // 2,
|
| 399 |
+
regions,
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
return output
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
### Make prompt list for each regions
|
| 406 |
+
def promptsmaker(prompts, batch):
|
| 407 |
+
out_p = []
|
| 408 |
+
plen = len(prompts)
|
| 409 |
+
for prompt in prompts:
|
| 410 |
+
add = ""
|
| 411 |
+
if KCOMM in prompt:
|
| 412 |
+
add, prompt = prompt.split(KCOMM)
|
| 413 |
+
add = add + " "
|
| 414 |
+
prompts = prompt.split(KBRK)
|
| 415 |
+
out_p.append([add + p for p in prompts])
|
| 416 |
+
out = [None] * batch * len(out_p[0]) * len(out_p)
|
| 417 |
+
for p, prs in enumerate(out_p): # inputs prompts
|
| 418 |
+
for r, pr in enumerate(prs): # prompts for regions
|
| 419 |
+
start = (p + r * plen) * batch
|
| 420 |
+
out[start : start + batch] = [pr] * batch # P1R1B1,P1R1B2...,P1R2B1,P1R2B2...,P2R1B1...
|
| 421 |
+
return out, out_p
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
### make regions from ratios
|
| 425 |
+
### ";" makes outercells, "," makes inner cells
|
| 426 |
+
def make_cells(ratios):
|
| 427 |
+
if ";" not in ratios and "," in ratios:
|
| 428 |
+
ratios = ratios.replace(",", ";")
|
| 429 |
+
ratios = ratios.split(";")
|
| 430 |
+
ratios = [inratios.split(",") for inratios in ratios]
|
| 431 |
+
|
| 432 |
+
icells = []
|
| 433 |
+
ocells = []
|
| 434 |
+
|
| 435 |
+
def startend(cells, array):
|
| 436 |
+
current_start = 0
|
| 437 |
+
array = [float(x) for x in array]
|
| 438 |
+
for value in array:
|
| 439 |
+
end = current_start + (value / sum(array))
|
| 440 |
+
cells.append([current_start, end])
|
| 441 |
+
current_start = end
|
| 442 |
+
|
| 443 |
+
startend(ocells, [r[0] for r in ratios])
|
| 444 |
+
|
| 445 |
+
for inratios in ratios:
|
| 446 |
+
if 2 > len(inratios):
|
| 447 |
+
icells.append([[0, 1]])
|
| 448 |
+
else:
|
| 449 |
+
add = []
|
| 450 |
+
startend(add, inratios[1:])
|
| 451 |
+
icells.append(add)
|
| 452 |
+
|
| 453 |
+
return ocells, icells, sum(len(cell) for cell in icells)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def make_emblist(self, prompts):
|
| 457 |
+
with torch.no_grad():
|
| 458 |
+
tokens = self.tokenizer(
|
| 459 |
+
prompts,
|
| 460 |
+
max_length=self.tokenizer.model_max_length,
|
| 461 |
+
padding=True,
|
| 462 |
+
truncation=True,
|
| 463 |
+
return_tensors="pt",
|
| 464 |
+
).input_ids.to(self.device)
|
| 465 |
+
embs = self.text_encoder(tokens, output_hidden_states=True).last_hidden_state.to(self.device, dtype=self.dtype)
|
| 466 |
+
return embs
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def split_dims(xs, height, width):
|
| 470 |
+
xs = xs
|
| 471 |
+
|
| 472 |
+
def repeat_div(x, y):
|
| 473 |
+
while y > 0:
|
| 474 |
+
x = math.ceil(x / 2)
|
| 475 |
+
y = y - 1
|
| 476 |
+
return x
|
| 477 |
+
|
| 478 |
+
scale = math.ceil(math.log2(math.sqrt(height * width / xs)))
|
| 479 |
+
dsh = repeat_div(height, scale)
|
| 480 |
+
dsw = repeat_div(width, scale)
|
| 481 |
+
return dsh, dsw
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
##### for prompt mode
|
| 485 |
+
def get_attn_maps(self, attn):
|
| 486 |
+
height, width = self.hw
|
| 487 |
+
target_tokens = self.target_tokens
|
| 488 |
+
if (height, width) not in self.attnmaps_sizes:
|
| 489 |
+
self.attnmaps_sizes.append((height, width))
|
| 490 |
+
|
| 491 |
+
for b in range(self.batch):
|
| 492 |
+
for t in target_tokens:
|
| 493 |
+
power = self.power
|
| 494 |
+
add = attn[b, :, :, t[0] : t[0] + len(t)] ** (power) * (self.attnmaps_sizes.index((height, width)) + 1)
|
| 495 |
+
add = torch.sum(add, dim=2)
|
| 496 |
+
key = f"{t}-{b}"
|
| 497 |
+
if key not in self.attnmaps:
|
| 498 |
+
self.attnmaps[key] = add
|
| 499 |
+
else:
|
| 500 |
+
if self.attnmaps[key].shape[1] != add.shape[1]:
|
| 501 |
+
add = add.view(8, height, width)
|
| 502 |
+
add = FF.resize(add, self.attnmaps_sizes[0], antialias=None)
|
| 503 |
+
add = add.reshape_as(self.attnmaps[key])
|
| 504 |
+
|
| 505 |
+
self.attnmaps[key] = self.attnmaps[key] + add
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def reset_attnmaps(self): # init parameters in every batch
|
| 509 |
+
self.step = 0
|
| 510 |
+
self.attnmaps = {} # maked from attention maps
|
| 511 |
+
self.attnmaps_sizes = [] # height,width set of u-net blocks
|
| 512 |
+
self.attnmasks = {} # maked from attnmaps for regions
|
| 513 |
+
self.maskready = False
|
| 514 |
+
self.history = {}
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def saveattnmaps(self, output, h, w, th, step, regions):
|
| 518 |
+
masks = []
|
| 519 |
+
for i, mask in enumerate(self.history[step].values()):
|
| 520 |
+
img, _, mask = makepmask(self, mask, h, w, th[i % len(th)], step)
|
| 521 |
+
if self.ex:
|
| 522 |
+
masks = [x - mask for x in masks]
|
| 523 |
+
masks.append(mask)
|
| 524 |
+
if len(masks) == regions - 1:
|
| 525 |
+
output.images.extend([FF.to_pil_image(mask) for mask in masks])
|
| 526 |
+
masks = []
|
| 527 |
+
else:
|
| 528 |
+
output.images.append(img)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def makepmask(
|
| 532 |
+
self, mask, h, w, th, step
|
| 533 |
+
): # make masks from attention cache return [for preview, for attention, for Latent]
|
| 534 |
+
th = th - step * 0.005
|
| 535 |
+
if 0.05 >= th:
|
| 536 |
+
th = 0.05
|
| 537 |
+
mask = torch.mean(mask, dim=0)
|
| 538 |
+
mask = mask / mask.max().item()
|
| 539 |
+
mask = torch.where(mask > th, 1, 0)
|
| 540 |
+
mask = mask.float()
|
| 541 |
+
mask = mask.view(1, *self.attnmaps_sizes[0])
|
| 542 |
+
img = FF.to_pil_image(mask)
|
| 543 |
+
img = img.resize((w, h))
|
| 544 |
+
mask = FF.resize(mask, (h, w), interpolation=FF.InterpolationMode.NEAREST, antialias=None)
|
| 545 |
+
lmask = mask
|
| 546 |
+
mask = mask.reshape(h * w)
|
| 547 |
+
mask = torch.where(mask > 0.1, 1, 0)
|
| 548 |
+
return img, mask, lmask
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def tokendealer(self, all_prompts):
|
| 552 |
+
for prompts in all_prompts:
|
| 553 |
+
targets = [p.split(",")[-1] for p in prompts[1:]]
|
| 554 |
+
tt = []
|
| 555 |
+
|
| 556 |
+
for target in targets:
|
| 557 |
+
ptokens = (
|
| 558 |
+
self.tokenizer(
|
| 559 |
+
prompts,
|
| 560 |
+
max_length=self.tokenizer.model_max_length,
|
| 561 |
+
padding=True,
|
| 562 |
+
truncation=True,
|
| 563 |
+
return_tensors="pt",
|
| 564 |
+
).input_ids
|
| 565 |
+
)[0]
|
| 566 |
+
ttokens = (
|
| 567 |
+
self.tokenizer(
|
| 568 |
+
target,
|
| 569 |
+
max_length=self.tokenizer.model_max_length,
|
| 570 |
+
padding=True,
|
| 571 |
+
truncation=True,
|
| 572 |
+
return_tensors="pt",
|
| 573 |
+
).input_ids
|
| 574 |
+
)[0]
|
| 575 |
+
|
| 576 |
+
tlist = []
|
| 577 |
+
|
| 578 |
+
for t in range(ttokens.shape[0] - 2):
|
| 579 |
+
for p in range(ptokens.shape[0]):
|
| 580 |
+
if ttokens[t + 1] == ptokens[p]:
|
| 581 |
+
tlist.append(p)
|
| 582 |
+
if tlist != []:
|
| 583 |
+
tt.append(tlist)
|
| 584 |
+
|
| 585 |
+
return tt
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def scaled_dot_product_attention(
|
| 589 |
+
self,
|
| 590 |
+
query,
|
| 591 |
+
key,
|
| 592 |
+
value,
|
| 593 |
+
attn_mask=None,
|
| 594 |
+
dropout_p=0.0,
|
| 595 |
+
is_causal=False,
|
| 596 |
+
scale=None,
|
| 597 |
+
getattn=False,
|
| 598 |
+
) -> torch.Tensor:
|
| 599 |
+
# Efficient implementation equivalent to the following:
|
| 600 |
+
L, S = query.size(-2), key.size(-2)
|
| 601 |
+
scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
|
| 602 |
+
attn_bias = torch.zeros(L, S, dtype=query.dtype, device=self.device)
|
| 603 |
+
if is_causal:
|
| 604 |
+
assert attn_mask is None
|
| 605 |
+
temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
|
| 606 |
+
attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
|
| 607 |
+
attn_bias.to(query.dtype)
|
| 608 |
+
|
| 609 |
+
if attn_mask is not None:
|
| 610 |
+
if attn_mask.dtype == torch.bool:
|
| 611 |
+
attn_mask.masked_fill_(attn_mask.logical_not(), float("-inf"))
|
| 612 |
+
else:
|
| 613 |
+
attn_bias += attn_mask
|
| 614 |
+
attn_weight = query @ key.transpose(-2, -1) * scale_factor
|
| 615 |
+
attn_weight += attn_bias
|
| 616 |
+
attn_weight = torch.softmax(attn_weight, dim=-1)
|
| 617 |
+
if getattn:
|
| 618 |
+
get_attn_maps(self, attn_weight)
|
| 619 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
|
| 620 |
+
return attn_weight @ value
|
v0.27.0/rerender_a_video.py
ADDED
|
@@ -0,0 +1,1194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import PIL.Image
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
import torchvision.transforms as T
|
| 23 |
+
from gmflow.gmflow import GMFlow
|
| 24 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 25 |
+
|
| 26 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 27 |
+
from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
|
| 28 |
+
from diffusers.models.attention_processor import Attention, AttnProcessor
|
| 29 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 30 |
+
from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
|
| 31 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 32 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 33 |
+
from diffusers.utils import BaseOutput, deprecate, logging
|
| 34 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def coords_grid(b, h, w, homogeneous=False, device=None):
|
| 41 |
+
y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
|
| 42 |
+
|
| 43 |
+
stacks = [x, y]
|
| 44 |
+
|
| 45 |
+
if homogeneous:
|
| 46 |
+
ones = torch.ones_like(x) # [H, W]
|
| 47 |
+
stacks.append(ones)
|
| 48 |
+
|
| 49 |
+
grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
|
| 50 |
+
|
| 51 |
+
grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
|
| 52 |
+
|
| 53 |
+
if device is not None:
|
| 54 |
+
grid = grid.to(device)
|
| 55 |
+
|
| 56 |
+
return grid
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False):
|
| 60 |
+
# img: [B, C, H, W]
|
| 61 |
+
# sample_coords: [B, 2, H, W] in image scale
|
| 62 |
+
if sample_coords.size(1) != 2: # [B, H, W, 2]
|
| 63 |
+
sample_coords = sample_coords.permute(0, 3, 1, 2)
|
| 64 |
+
|
| 65 |
+
b, _, h, w = sample_coords.shape
|
| 66 |
+
|
| 67 |
+
# Normalize to [-1, 1]
|
| 68 |
+
x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
|
| 69 |
+
y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
|
| 70 |
+
|
| 71 |
+
grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
|
| 72 |
+
|
| 73 |
+
img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
|
| 74 |
+
|
| 75 |
+
if return_mask:
|
| 76 |
+
mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
|
| 77 |
+
|
| 78 |
+
return img, mask
|
| 79 |
+
|
| 80 |
+
return img
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"):
|
| 84 |
+
b, c, h, w = feature.size()
|
| 85 |
+
assert flow.size(1) == 2
|
| 86 |
+
|
| 87 |
+
grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
|
| 88 |
+
grid = grid.to(feature.dtype)
|
| 89 |
+
return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5):
|
| 93 |
+
# fwd_flow, bwd_flow: [B, 2, H, W]
|
| 94 |
+
# alpha and beta values are following UnFlow
|
| 95 |
+
# (https://arxiv.org/abs/1711.07837)
|
| 96 |
+
assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
|
| 97 |
+
assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
|
| 98 |
+
flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
|
| 99 |
+
|
| 100 |
+
warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
|
| 101 |
+
warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
|
| 102 |
+
|
| 103 |
+
diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
|
| 104 |
+
diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
|
| 105 |
+
|
| 106 |
+
threshold = alpha * flow_mag + beta
|
| 107 |
+
|
| 108 |
+
fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
|
| 109 |
+
bwd_occ = (diff_bwd > threshold).float()
|
| 110 |
+
|
| 111 |
+
return fwd_occ, bwd_occ
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@torch.no_grad()
|
| 115 |
+
def get_warped_and_mask(flow_model, image1, image2, image3=None, pixel_consistency=False, device=None):
|
| 116 |
+
if image3 is None:
|
| 117 |
+
image3 = image1
|
| 118 |
+
padder = InputPadder(image1.shape, padding_factor=8)
|
| 119 |
+
image1, image2 = padder.pad(image1[None].to(device), image2[None].to(device))
|
| 120 |
+
results_dict = flow_model(
|
| 121 |
+
image1, image2, attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True
|
| 122 |
+
)
|
| 123 |
+
flow_pr = results_dict["flow_preds"][-1] # [B, 2, H, W]
|
| 124 |
+
fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W]
|
| 125 |
+
bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W]
|
| 126 |
+
fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float
|
| 127 |
+
if pixel_consistency:
|
| 128 |
+
warped_image1 = flow_warp(image1, bwd_flow)
|
| 129 |
+
bwd_occ = torch.clamp(
|
| 130 |
+
bwd_occ + (abs(image2 - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1
|
| 131 |
+
).unsqueeze(0)
|
| 132 |
+
warped_results = flow_warp(image3, bwd_flow)
|
| 133 |
+
return warped_results, bwd_occ, bwd_flow
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
blur = T.GaussianBlur(kernel_size=(9, 9), sigma=(18, 18))
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@dataclass
|
| 140 |
+
class TextToVideoSDPipelineOutput(BaseOutput):
|
| 141 |
+
"""
|
| 142 |
+
Output class for text-to-video pipelines.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
frames (`List[np.ndarray]` or `torch.FloatTensor`)
|
| 146 |
+
List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as
|
| 147 |
+
a `torch` tensor. The length of the list denotes the video length (the number of frames).
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
frames: Union[List[np.ndarray], torch.FloatTensor]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@torch.no_grad()
|
| 154 |
+
def find_flat_region(mask):
|
| 155 |
+
device = mask.device
|
| 156 |
+
kernel_x = torch.Tensor([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0).to(device)
|
| 157 |
+
kernel_y = torch.Tensor([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]).unsqueeze(0).unsqueeze(0).to(device)
|
| 158 |
+
mask_ = F.pad(mask.unsqueeze(0), (1, 1, 1, 1), mode="replicate")
|
| 159 |
+
|
| 160 |
+
grad_x = torch.nn.functional.conv2d(mask_, kernel_x)
|
| 161 |
+
grad_y = torch.nn.functional.conv2d(mask_, kernel_y)
|
| 162 |
+
return ((abs(grad_x) + abs(grad_y)) == 0).float()[0]
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class AttnState:
|
| 166 |
+
STORE = 0
|
| 167 |
+
LOAD = 1
|
| 168 |
+
LOAD_AND_STORE_PREV = 2
|
| 169 |
+
|
| 170 |
+
def __init__(self):
|
| 171 |
+
self.reset()
|
| 172 |
+
|
| 173 |
+
@property
|
| 174 |
+
def state(self):
|
| 175 |
+
return self.__state
|
| 176 |
+
|
| 177 |
+
@property
|
| 178 |
+
def timestep(self):
|
| 179 |
+
return self.__timestep
|
| 180 |
+
|
| 181 |
+
def set_timestep(self, t):
|
| 182 |
+
self.__timestep = t
|
| 183 |
+
|
| 184 |
+
def reset(self):
|
| 185 |
+
self.__state = AttnState.STORE
|
| 186 |
+
self.__timestep = 0
|
| 187 |
+
|
| 188 |
+
def to_load(self):
|
| 189 |
+
self.__state = AttnState.LOAD
|
| 190 |
+
|
| 191 |
+
def to_load_and_store_prev(self):
|
| 192 |
+
self.__state = AttnState.LOAD_AND_STORE_PREV
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class CrossFrameAttnProcessor(AttnProcessor):
|
| 196 |
+
"""
|
| 197 |
+
Cross frame attention processor. Each frame attends the first frame and previous frame.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
attn_state: Whether the model is processing the first frame or an intermediate frame
|
| 201 |
+
"""
|
| 202 |
+
|
| 203 |
+
def __init__(self, attn_state: AttnState):
|
| 204 |
+
super().__init__()
|
| 205 |
+
self.attn_state = attn_state
|
| 206 |
+
self.first_maps = {}
|
| 207 |
+
self.prev_maps = {}
|
| 208 |
+
|
| 209 |
+
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
|
| 210 |
+
# Is self attention
|
| 211 |
+
if encoder_hidden_states is None:
|
| 212 |
+
t = self.attn_state.timestep
|
| 213 |
+
if self.attn_state.state == AttnState.STORE:
|
| 214 |
+
self.first_maps[t] = hidden_states.detach()
|
| 215 |
+
self.prev_maps[t] = hidden_states.detach()
|
| 216 |
+
res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
|
| 217 |
+
else:
|
| 218 |
+
if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
|
| 219 |
+
tmp = hidden_states.detach()
|
| 220 |
+
cross_map = torch.cat((self.first_maps[t], self.prev_maps[t]), dim=1)
|
| 221 |
+
res = super().__call__(attn, hidden_states, cross_map, attention_mask, temb)
|
| 222 |
+
if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
|
| 223 |
+
self.prev_maps[t] = tmp
|
| 224 |
+
else:
|
| 225 |
+
res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
|
| 226 |
+
|
| 227 |
+
return res
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def prepare_image(image):
|
| 231 |
+
if isinstance(image, torch.Tensor):
|
| 232 |
+
# Batch single image
|
| 233 |
+
if image.ndim == 3:
|
| 234 |
+
image = image.unsqueeze(0)
|
| 235 |
+
|
| 236 |
+
image = image.to(dtype=torch.float32)
|
| 237 |
+
else:
|
| 238 |
+
# preprocess image
|
| 239 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 240 |
+
image = [image]
|
| 241 |
+
|
| 242 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 243 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 244 |
+
image = np.concatenate(image, axis=0)
|
| 245 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 246 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 247 |
+
|
| 248 |
+
image = image.transpose(0, 3, 1, 2)
|
| 249 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 250 |
+
|
| 251 |
+
return image
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
| 255 |
+
r"""
|
| 256 |
+
Pipeline for video-to-video translation using Stable Diffusion with Rerender Algorithm.
|
| 257 |
+
|
| 258 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 259 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 260 |
+
|
| 261 |
+
In addition the pipeline inherits the following loading methods:
|
| 262 |
+
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
vae ([`AutoencoderKL`]):
|
| 266 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 267 |
+
text_encoder ([`CLIPTextModel`]):
|
| 268 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 269 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 270 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 271 |
+
tokenizer (`CLIPTokenizer`):
|
| 272 |
+
Tokenizer of class
|
| 273 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 274 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 275 |
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
| 276 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
| 277 |
+
as a list, the outputs from each ControlNet are added together to create one combined additional
|
| 278 |
+
conditioning.
|
| 279 |
+
scheduler ([`SchedulerMixin`]):
|
| 280 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 281 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 282 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 283 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 284 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 285 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 286 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 290 |
+
|
| 291 |
+
def __init__(
|
| 292 |
+
self,
|
| 293 |
+
vae: AutoencoderKL,
|
| 294 |
+
text_encoder: CLIPTextModel,
|
| 295 |
+
tokenizer: CLIPTokenizer,
|
| 296 |
+
unet: UNet2DConditionModel,
|
| 297 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 298 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 299 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 300 |
+
feature_extractor: CLIPImageProcessor,
|
| 301 |
+
image_encoder=None,
|
| 302 |
+
requires_safety_checker: bool = True,
|
| 303 |
+
device=None,
|
| 304 |
+
):
|
| 305 |
+
super().__init__(
|
| 306 |
+
vae,
|
| 307 |
+
text_encoder,
|
| 308 |
+
tokenizer,
|
| 309 |
+
unet,
|
| 310 |
+
controlnet,
|
| 311 |
+
scheduler,
|
| 312 |
+
safety_checker,
|
| 313 |
+
feature_extractor,
|
| 314 |
+
image_encoder,
|
| 315 |
+
requires_safety_checker,
|
| 316 |
+
)
|
| 317 |
+
self.to(device)
|
| 318 |
+
|
| 319 |
+
if safety_checker is None and requires_safety_checker:
|
| 320 |
+
logger.warning(
|
| 321 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 322 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 323 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 324 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 325 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 326 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
if safety_checker is not None and feature_extractor is None:
|
| 330 |
+
raise ValueError(
|
| 331 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 332 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
if isinstance(controlnet, (list, tuple)):
|
| 336 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 337 |
+
|
| 338 |
+
self.register_modules(
|
| 339 |
+
vae=vae,
|
| 340 |
+
text_encoder=text_encoder,
|
| 341 |
+
tokenizer=tokenizer,
|
| 342 |
+
unet=unet,
|
| 343 |
+
controlnet=controlnet,
|
| 344 |
+
scheduler=scheduler,
|
| 345 |
+
safety_checker=safety_checker,
|
| 346 |
+
feature_extractor=feature_extractor,
|
| 347 |
+
)
|
| 348 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 349 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 350 |
+
self.control_image_processor = VaeImageProcessor(
|
| 351 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 352 |
+
)
|
| 353 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 354 |
+
self.attn_state = AttnState()
|
| 355 |
+
attn_processor_dict = {}
|
| 356 |
+
for k in unet.attn_processors.keys():
|
| 357 |
+
if k.startswith("up"):
|
| 358 |
+
attn_processor_dict[k] = CrossFrameAttnProcessor(self.attn_state)
|
| 359 |
+
else:
|
| 360 |
+
attn_processor_dict[k] = AttnProcessor()
|
| 361 |
+
|
| 362 |
+
self.unet.set_attn_processor(attn_processor_dict)
|
| 363 |
+
|
| 364 |
+
flow_model = GMFlow(
|
| 365 |
+
feature_channels=128,
|
| 366 |
+
num_scales=1,
|
| 367 |
+
upsample_factor=8,
|
| 368 |
+
num_head=1,
|
| 369 |
+
attention_type="swin",
|
| 370 |
+
ffn_dim_expansion=4,
|
| 371 |
+
num_transformer_layers=6,
|
| 372 |
+
).to(self.device)
|
| 373 |
+
|
| 374 |
+
checkpoint = torch.utils.model_zoo.load_url(
|
| 375 |
+
"https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth",
|
| 376 |
+
map_location=lambda storage, loc: storage,
|
| 377 |
+
)
|
| 378 |
+
weights = checkpoint["model"] if "model" in checkpoint else checkpoint
|
| 379 |
+
flow_model.load_state_dict(weights, strict=False)
|
| 380 |
+
flow_model.eval()
|
| 381 |
+
self.flow_model = flow_model
|
| 382 |
+
|
| 383 |
+
# Modified from src/diffusers/pipelines/controlnet/pipeline_controlnet.StableDiffusionControlNetImg2ImgPipeline.check_inputs
|
| 384 |
+
def check_inputs(
|
| 385 |
+
self,
|
| 386 |
+
prompt,
|
| 387 |
+
callback_steps,
|
| 388 |
+
negative_prompt=None,
|
| 389 |
+
prompt_embeds=None,
|
| 390 |
+
negative_prompt_embeds=None,
|
| 391 |
+
controlnet_conditioning_scale=1.0,
|
| 392 |
+
control_guidance_start=0.0,
|
| 393 |
+
control_guidance_end=1.0,
|
| 394 |
+
):
|
| 395 |
+
if (callback_steps is None) or (
|
| 396 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 397 |
+
):
|
| 398 |
+
raise ValueError(
|
| 399 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 400 |
+
f" {type(callback_steps)}."
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
if prompt is not None and prompt_embeds is not None:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 406 |
+
" only forward one of the two."
|
| 407 |
+
)
|
| 408 |
+
elif prompt is None and prompt_embeds is None:
|
| 409 |
+
raise ValueError(
|
| 410 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 411 |
+
)
|
| 412 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 413 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 414 |
+
|
| 415 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 416 |
+
raise ValueError(
|
| 417 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 418 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 422 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 423 |
+
raise ValueError(
|
| 424 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 425 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 426 |
+
f" {negative_prompt_embeds.shape}."
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 430 |
+
# conditionings.
|
| 431 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 432 |
+
if isinstance(prompt, list):
|
| 433 |
+
logger.warning(
|
| 434 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 435 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 439 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# Check `controlnet_conditioning_scale`
|
| 443 |
+
if (
|
| 444 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 445 |
+
or is_compiled
|
| 446 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 447 |
+
):
|
| 448 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 449 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 450 |
+
elif (
|
| 451 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 452 |
+
or is_compiled
|
| 453 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 454 |
+
):
|
| 455 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 456 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 457 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 458 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 459 |
+
self.controlnet.nets
|
| 460 |
+
):
|
| 461 |
+
raise ValueError(
|
| 462 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 463 |
+
" the same length as the number of controlnets"
|
| 464 |
+
)
|
| 465 |
+
else:
|
| 466 |
+
assert False
|
| 467 |
+
|
| 468 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 469 |
+
raise ValueError(
|
| 470 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 474 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 475 |
+
raise ValueError(
|
| 476 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 480 |
+
if start >= end:
|
| 481 |
+
raise ValueError(
|
| 482 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 483 |
+
)
|
| 484 |
+
if start < 0.0:
|
| 485 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 486 |
+
if end > 1.0:
|
| 487 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 488 |
+
|
| 489 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 490 |
+
def prepare_control_image(
|
| 491 |
+
self,
|
| 492 |
+
image,
|
| 493 |
+
width,
|
| 494 |
+
height,
|
| 495 |
+
batch_size,
|
| 496 |
+
num_images_per_prompt,
|
| 497 |
+
device,
|
| 498 |
+
dtype,
|
| 499 |
+
do_classifier_free_guidance=False,
|
| 500 |
+
guess_mode=False,
|
| 501 |
+
):
|
| 502 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 503 |
+
image_batch_size = image.shape[0]
|
| 504 |
+
|
| 505 |
+
if image_batch_size == 1:
|
| 506 |
+
repeat_by = batch_size
|
| 507 |
+
else:
|
| 508 |
+
# image batch size is the same as prompt batch size
|
| 509 |
+
repeat_by = num_images_per_prompt
|
| 510 |
+
|
| 511 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 512 |
+
|
| 513 |
+
image = image.to(device=device, dtype=dtype)
|
| 514 |
+
|
| 515 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 516 |
+
image = torch.cat([image] * 2)
|
| 517 |
+
|
| 518 |
+
return image
|
| 519 |
+
|
| 520 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 521 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 522 |
+
# get the original timestep using init_timestep
|
| 523 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 524 |
+
|
| 525 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 526 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 527 |
+
|
| 528 |
+
return timesteps, num_inference_steps - t_start
|
| 529 |
+
|
| 530 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
|
| 531 |
+
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
| 532 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 533 |
+
raise ValueError(
|
| 534 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
image = image.to(device=device, dtype=dtype)
|
| 538 |
+
|
| 539 |
+
batch_size = batch_size * num_images_per_prompt
|
| 540 |
+
|
| 541 |
+
if image.shape[1] == 4:
|
| 542 |
+
init_latents = image
|
| 543 |
+
|
| 544 |
+
else:
|
| 545 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 546 |
+
raise ValueError(
|
| 547 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 548 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
elif isinstance(generator, list):
|
| 552 |
+
init_latents = [
|
| 553 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 554 |
+
]
|
| 555 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 556 |
+
else:
|
| 557 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 558 |
+
|
| 559 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 560 |
+
|
| 561 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 562 |
+
# expand init_latents for batch_size
|
| 563 |
+
deprecation_message = (
|
| 564 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 565 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 566 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 567 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 568 |
+
)
|
| 569 |
+
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 570 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 571 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 572 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 573 |
+
raise ValueError(
|
| 574 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 575 |
+
)
|
| 576 |
+
else:
|
| 577 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 578 |
+
|
| 579 |
+
shape = init_latents.shape
|
| 580 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 581 |
+
|
| 582 |
+
# get latents
|
| 583 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 584 |
+
latents = init_latents
|
| 585 |
+
|
| 586 |
+
return latents
|
| 587 |
+
|
| 588 |
+
@torch.no_grad()
|
| 589 |
+
def __call__(
|
| 590 |
+
self,
|
| 591 |
+
prompt: Union[str, List[str]] = None,
|
| 592 |
+
frames: Union[List[np.ndarray], torch.FloatTensor] = None,
|
| 593 |
+
control_frames: Union[List[np.ndarray], torch.FloatTensor] = None,
|
| 594 |
+
strength: float = 0.8,
|
| 595 |
+
num_inference_steps: int = 50,
|
| 596 |
+
guidance_scale: float = 7.5,
|
| 597 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 598 |
+
eta: float = 0.0,
|
| 599 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 600 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 601 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 602 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 603 |
+
output_type: Optional[str] = "pil",
|
| 604 |
+
return_dict: bool = True,
|
| 605 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 606 |
+
callback_steps: int = 1,
|
| 607 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 608 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 609 |
+
guess_mode: bool = False,
|
| 610 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 611 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 612 |
+
warp_start: Union[float, List[float]] = 0.0,
|
| 613 |
+
warp_end: Union[float, List[float]] = 0.3,
|
| 614 |
+
mask_start: Union[float, List[float]] = 0.5,
|
| 615 |
+
mask_end: Union[float, List[float]] = 0.8,
|
| 616 |
+
smooth_boundary: bool = True,
|
| 617 |
+
mask_strength: Union[float, List[float]] = 0.5,
|
| 618 |
+
inner_strength: Union[float, List[float]] = 0.9,
|
| 619 |
+
):
|
| 620 |
+
r"""
|
| 621 |
+
Function invoked when calling the pipeline for generation.
|
| 622 |
+
|
| 623 |
+
Args:
|
| 624 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 625 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 626 |
+
instead.
|
| 627 |
+
frames (`List[np.ndarray]` or `torch.FloatTensor`): The input images to be used as the starting point for the image generation process.
|
| 628 |
+
control_frames (`List[np.ndarray]` or `torch.FloatTensor`): The ControlNet input images condition to provide guidance to the `unet` for generation.
|
| 629 |
+
strength ('float'): SDEdit strength.
|
| 630 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 631 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 632 |
+
expense of slower inference.
|
| 633 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 634 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 635 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 636 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 637 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 638 |
+
usually at the expense of lower image quality.
|
| 639 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 640 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 641 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 642 |
+
less than `1`).
|
| 643 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 644 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 645 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 646 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 647 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 648 |
+
to make generation deterministic.
|
| 649 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 650 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 651 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 652 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 653 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 654 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 655 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 656 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 657 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 658 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 659 |
+
argument.
|
| 660 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 661 |
+
The output format of the generate image. Choose between
|
| 662 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 663 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 664 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 665 |
+
plain tuple.
|
| 666 |
+
callback (`Callable`, *optional*):
|
| 667 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 668 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 669 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 670 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 671 |
+
called at every step.
|
| 672 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 673 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 674 |
+
`self.processor` in
|
| 675 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 676 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 677 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 678 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 679 |
+
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
|
| 680 |
+
than for [`~StableDiffusionControlNetPipeline.__call__`].
|
| 681 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 682 |
+
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
| 683 |
+
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
| 684 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 685 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 686 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 687 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 688 |
+
warp_start (`float`): Shape-aware fusion start timestep.
|
| 689 |
+
warp_end (`float`): Shape-aware fusion end timestep.
|
| 690 |
+
mask_start (`float`): Pixel-aware fusion start timestep.
|
| 691 |
+
mask_end (`float`):Pixel-aware fusion end timestep.
|
| 692 |
+
smooth_boundary (`bool`): Smooth fusion boundary. Set `True` to prevent artifacts at boundary.
|
| 693 |
+
mask_strength (`float`): Pixel-aware fusion strength.
|
| 694 |
+
inner_strength (`float`): Pixel-aware fusion detail level.
|
| 695 |
+
|
| 696 |
+
Examples:
|
| 697 |
+
|
| 698 |
+
Returns:
|
| 699 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 700 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 701 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 702 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 703 |
+
(nsfw) content, according to the `safety_checker`.
|
| 704 |
+
"""
|
| 705 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 706 |
+
|
| 707 |
+
# align format for control guidance
|
| 708 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 709 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 710 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 711 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 712 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 713 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 714 |
+
control_guidance_start, control_guidance_end = (
|
| 715 |
+
mult * [control_guidance_start],
|
| 716 |
+
mult * [control_guidance_end],
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
# 1. Check inputs. Raise error if not correct
|
| 720 |
+
self.check_inputs(
|
| 721 |
+
prompt,
|
| 722 |
+
callback_steps,
|
| 723 |
+
negative_prompt,
|
| 724 |
+
prompt_embeds,
|
| 725 |
+
negative_prompt_embeds,
|
| 726 |
+
controlnet_conditioning_scale,
|
| 727 |
+
control_guidance_start,
|
| 728 |
+
control_guidance_end,
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
# 2. Define call parameters
|
| 732 |
+
# Currently we only support 1 prompt
|
| 733 |
+
if prompt is not None and isinstance(prompt, str):
|
| 734 |
+
batch_size = 1
|
| 735 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 736 |
+
assert False
|
| 737 |
+
else:
|
| 738 |
+
assert False
|
| 739 |
+
num_images_per_prompt = 1
|
| 740 |
+
|
| 741 |
+
device = self._execution_device
|
| 742 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 743 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 744 |
+
# corresponds to doing no classifier free guidance.
|
| 745 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 746 |
+
|
| 747 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 748 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 749 |
+
|
| 750 |
+
global_pool_conditions = (
|
| 751 |
+
controlnet.config.global_pool_conditions
|
| 752 |
+
if isinstance(controlnet, ControlNetModel)
|
| 753 |
+
else controlnet.nets[0].config.global_pool_conditions
|
| 754 |
+
)
|
| 755 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 756 |
+
|
| 757 |
+
# 3. Encode input prompt
|
| 758 |
+
text_encoder_lora_scale = (
|
| 759 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 760 |
+
)
|
| 761 |
+
prompt_embeds = self._encode_prompt(
|
| 762 |
+
prompt,
|
| 763 |
+
device,
|
| 764 |
+
num_images_per_prompt,
|
| 765 |
+
do_classifier_free_guidance,
|
| 766 |
+
negative_prompt,
|
| 767 |
+
prompt_embeds=prompt_embeds,
|
| 768 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 769 |
+
lora_scale=text_encoder_lora_scale,
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
# 4. Process the first frame
|
| 773 |
+
height, width = None, None
|
| 774 |
+
output_frames = []
|
| 775 |
+
self.attn_state.reset()
|
| 776 |
+
|
| 777 |
+
# 4.1 prepare frames
|
| 778 |
+
image = self.image_processor.preprocess(frames[0]).to(dtype=torch.float32)
|
| 779 |
+
first_image = image[0] # C, H, W
|
| 780 |
+
|
| 781 |
+
# 4.2 Prepare controlnet_conditioning_image
|
| 782 |
+
# Currently we only support single control
|
| 783 |
+
if isinstance(controlnet, ControlNetModel):
|
| 784 |
+
control_image = self.prepare_control_image(
|
| 785 |
+
image=control_frames[0],
|
| 786 |
+
width=width,
|
| 787 |
+
height=height,
|
| 788 |
+
batch_size=batch_size,
|
| 789 |
+
num_images_per_prompt=1,
|
| 790 |
+
device=device,
|
| 791 |
+
dtype=controlnet.dtype,
|
| 792 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 793 |
+
guess_mode=guess_mode,
|
| 794 |
+
)
|
| 795 |
+
else:
|
| 796 |
+
assert False
|
| 797 |
+
|
| 798 |
+
# 4.3 Prepare timesteps
|
| 799 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 800 |
+
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 801 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 802 |
+
|
| 803 |
+
# 4.4 Prepare latent variables
|
| 804 |
+
latents = self.prepare_latents(
|
| 805 |
+
image,
|
| 806 |
+
latent_timestep,
|
| 807 |
+
batch_size,
|
| 808 |
+
num_images_per_prompt,
|
| 809 |
+
prompt_embeds.dtype,
|
| 810 |
+
device,
|
| 811 |
+
generator,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
# 4.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 815 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 816 |
+
|
| 817 |
+
# 4.6 Create tensor stating which controlnets to keep
|
| 818 |
+
controlnet_keep = []
|
| 819 |
+
for i in range(len(timesteps)):
|
| 820 |
+
keeps = [
|
| 821 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 822 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 823 |
+
]
|
| 824 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 825 |
+
|
| 826 |
+
first_x0_list = []
|
| 827 |
+
|
| 828 |
+
# 4.7 Denoising loop
|
| 829 |
+
num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
|
| 830 |
+
with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
|
| 831 |
+
for i, t in enumerate(timesteps):
|
| 832 |
+
self.attn_state.set_timestep(t.item())
|
| 833 |
+
|
| 834 |
+
# expand the latents if we are doing classifier free guidance
|
| 835 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 836 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 837 |
+
|
| 838 |
+
# controlnet(s) inference
|
| 839 |
+
if guess_mode and do_classifier_free_guidance:
|
| 840 |
+
# Infer ControlNet only for the conditional batch.
|
| 841 |
+
control_model_input = latents
|
| 842 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 843 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 844 |
+
else:
|
| 845 |
+
control_model_input = latent_model_input
|
| 846 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 847 |
+
|
| 848 |
+
if isinstance(controlnet_keep[i], list):
|
| 849 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 850 |
+
else:
|
| 851 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 852 |
+
if isinstance(controlnet_cond_scale, list):
|
| 853 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 854 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 855 |
+
|
| 856 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 857 |
+
control_model_input,
|
| 858 |
+
t,
|
| 859 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 860 |
+
controlnet_cond=control_image,
|
| 861 |
+
conditioning_scale=cond_scale,
|
| 862 |
+
guess_mode=guess_mode,
|
| 863 |
+
return_dict=False,
|
| 864 |
+
)
|
| 865 |
+
|
| 866 |
+
if guess_mode and do_classifier_free_guidance:
|
| 867 |
+
# Infered ControlNet only for the conditional batch.
|
| 868 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 869 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 870 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 871 |
+
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
| 872 |
+
|
| 873 |
+
# predict the noise residual
|
| 874 |
+
noise_pred = self.unet(
|
| 875 |
+
latent_model_input,
|
| 876 |
+
t,
|
| 877 |
+
encoder_hidden_states=prompt_embeds,
|
| 878 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 879 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 880 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 881 |
+
return_dict=False,
|
| 882 |
+
)[0]
|
| 883 |
+
|
| 884 |
+
# perform guidance
|
| 885 |
+
if do_classifier_free_guidance:
|
| 886 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 887 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 888 |
+
|
| 889 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[t]
|
| 890 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 891 |
+
pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 892 |
+
first_x0 = pred_x0.detach()
|
| 893 |
+
first_x0_list.append(first_x0)
|
| 894 |
+
|
| 895 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 896 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 897 |
+
|
| 898 |
+
# call the callback, if provided
|
| 899 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 900 |
+
progress_bar.update()
|
| 901 |
+
if callback is not None and i % callback_steps == 0:
|
| 902 |
+
callback(i, t, latents)
|
| 903 |
+
|
| 904 |
+
if not output_type == "latent":
|
| 905 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 906 |
+
else:
|
| 907 |
+
image = latents
|
| 908 |
+
|
| 909 |
+
first_result = image
|
| 910 |
+
prev_result = image
|
| 911 |
+
do_denormalize = [True] * image.shape[0]
|
| 912 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 913 |
+
|
| 914 |
+
output_frames.append(image[0])
|
| 915 |
+
|
| 916 |
+
# 5. Process each frame
|
| 917 |
+
for idx in range(1, len(frames)):
|
| 918 |
+
image = frames[idx]
|
| 919 |
+
prev_image = frames[idx - 1]
|
| 920 |
+
control_image = control_frames[idx]
|
| 921 |
+
# 5.1 prepare frames
|
| 922 |
+
image = self.image_processor.preprocess(image).to(dtype=torch.float32)
|
| 923 |
+
prev_image = self.image_processor.preprocess(prev_image).to(dtype=torch.float32)
|
| 924 |
+
|
| 925 |
+
warped_0, bwd_occ_0, bwd_flow_0 = get_warped_and_mask(
|
| 926 |
+
self.flow_model, first_image, image[0], first_result, False, self.device
|
| 927 |
+
)
|
| 928 |
+
blend_mask_0 = blur(F.max_pool2d(bwd_occ_0, kernel_size=9, stride=1, padding=4))
|
| 929 |
+
blend_mask_0 = torch.clamp(blend_mask_0 + bwd_occ_0, 0, 1)
|
| 930 |
+
|
| 931 |
+
warped_pre, bwd_occ_pre, bwd_flow_pre = get_warped_and_mask(
|
| 932 |
+
self.flow_model, prev_image[0], image[0], prev_result, False, self.device
|
| 933 |
+
)
|
| 934 |
+
blend_mask_pre = blur(F.max_pool2d(bwd_occ_pre, kernel_size=9, stride=1, padding=4))
|
| 935 |
+
blend_mask_pre = torch.clamp(blend_mask_pre + bwd_occ_pre, 0, 1)
|
| 936 |
+
|
| 937 |
+
warp_mask = 1 - F.max_pool2d(blend_mask_0, kernel_size=8)
|
| 938 |
+
warp_flow = F.interpolate(bwd_flow_0 / 8.0, scale_factor=1.0 / 8, mode="bilinear")
|
| 939 |
+
|
| 940 |
+
# 5.2 Prepare controlnet_conditioning_image
|
| 941 |
+
# Currently we only support single control
|
| 942 |
+
if isinstance(controlnet, ControlNetModel):
|
| 943 |
+
control_image = self.prepare_control_image(
|
| 944 |
+
image=control_image,
|
| 945 |
+
width=width,
|
| 946 |
+
height=height,
|
| 947 |
+
batch_size=batch_size,
|
| 948 |
+
num_images_per_prompt=1,
|
| 949 |
+
device=device,
|
| 950 |
+
dtype=controlnet.dtype,
|
| 951 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 952 |
+
guess_mode=guess_mode,
|
| 953 |
+
)
|
| 954 |
+
else:
|
| 955 |
+
assert False
|
| 956 |
+
|
| 957 |
+
# 5.3 Prepare timesteps
|
| 958 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 959 |
+
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 960 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 961 |
+
|
| 962 |
+
skip_t = int(num_inference_steps * (1 - strength))
|
| 963 |
+
warp_start_t = int(warp_start * num_inference_steps)
|
| 964 |
+
warp_end_t = int(warp_end * num_inference_steps)
|
| 965 |
+
mask_start_t = int(mask_start * num_inference_steps)
|
| 966 |
+
mask_end_t = int(mask_end * num_inference_steps)
|
| 967 |
+
|
| 968 |
+
# 5.4 Prepare latent variables
|
| 969 |
+
init_latents = self.prepare_latents(
|
| 970 |
+
image,
|
| 971 |
+
latent_timestep,
|
| 972 |
+
batch_size,
|
| 973 |
+
num_images_per_prompt,
|
| 974 |
+
prompt_embeds.dtype,
|
| 975 |
+
device,
|
| 976 |
+
generator,
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
# 5.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 980 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 981 |
+
|
| 982 |
+
# 5.6 Create tensor stating which controlnets to keep
|
| 983 |
+
controlnet_keep = []
|
| 984 |
+
for i in range(len(timesteps)):
|
| 985 |
+
keeps = [
|
| 986 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 987 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 988 |
+
]
|
| 989 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 990 |
+
|
| 991 |
+
# 5.7 Denoising loop
|
| 992 |
+
num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
|
| 993 |
+
|
| 994 |
+
def denoising_loop(latents, mask=None, xtrg=None, noise_rescale=None):
|
| 995 |
+
dir_xt = 0
|
| 996 |
+
latents_dtype = latents.dtype
|
| 997 |
+
with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
|
| 998 |
+
for i, t in enumerate(timesteps):
|
| 999 |
+
self.attn_state.set_timestep(t.item())
|
| 1000 |
+
if i + skip_t >= mask_start_t and i + skip_t <= mask_end_t and xtrg is not None:
|
| 1001 |
+
rescale = torch.maximum(1.0 - mask, (1 - mask**2) ** 0.5 * inner_strength)
|
| 1002 |
+
if noise_rescale is not None:
|
| 1003 |
+
rescale = (1.0 - mask) * (1 - noise_rescale) + rescale * noise_rescale
|
| 1004 |
+
noise = randn_tensor(xtrg.shape, generator=generator, device=device, dtype=xtrg.dtype)
|
| 1005 |
+
latents_ref = self.scheduler.add_noise(xtrg, noise, t)
|
| 1006 |
+
latents = latents_ref * mask + (1.0 - mask) * (latents - dir_xt) + rescale * dir_xt
|
| 1007 |
+
latents = latents.to(latents_dtype)
|
| 1008 |
+
|
| 1009 |
+
# expand the latents if we are doing classifier free guidance
|
| 1010 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1011 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1012 |
+
|
| 1013 |
+
# controlnet(s) inference
|
| 1014 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1015 |
+
# Infer ControlNet only for the conditional batch.
|
| 1016 |
+
control_model_input = latents
|
| 1017 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1018 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1019 |
+
else:
|
| 1020 |
+
control_model_input = latent_model_input
|
| 1021 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1022 |
+
|
| 1023 |
+
if isinstance(controlnet_keep[i], list):
|
| 1024 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1025 |
+
else:
|
| 1026 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1027 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1028 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1029 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1030 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1031 |
+
control_model_input,
|
| 1032 |
+
t,
|
| 1033 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1034 |
+
controlnet_cond=control_image,
|
| 1035 |
+
conditioning_scale=cond_scale,
|
| 1036 |
+
guess_mode=guess_mode,
|
| 1037 |
+
return_dict=False,
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1041 |
+
# Infered ControlNet only for the conditional batch.
|
| 1042 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1043 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1044 |
+
down_block_res_samples = [
|
| 1045 |
+
torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples
|
| 1046 |
+
]
|
| 1047 |
+
mid_block_res_sample = torch.cat(
|
| 1048 |
+
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
|
| 1049 |
+
)
|
| 1050 |
+
|
| 1051 |
+
# predict the noise residual
|
| 1052 |
+
noise_pred = self.unet(
|
| 1053 |
+
latent_model_input,
|
| 1054 |
+
t,
|
| 1055 |
+
encoder_hidden_states=prompt_embeds,
|
| 1056 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1057 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1058 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1059 |
+
return_dict=False,
|
| 1060 |
+
)[0]
|
| 1061 |
+
|
| 1062 |
+
# perform guidance
|
| 1063 |
+
if do_classifier_free_guidance:
|
| 1064 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1065 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1066 |
+
|
| 1067 |
+
# Get pred_x0 from scheduler
|
| 1068 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[t]
|
| 1069 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 1070 |
+
pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 1071 |
+
|
| 1072 |
+
if i + skip_t >= warp_start_t and i + skip_t <= warp_end_t:
|
| 1073 |
+
# warp x_0
|
| 1074 |
+
pred_x0 = (
|
| 1075 |
+
flow_warp(first_x0_list[i], warp_flow, mode="nearest") * warp_mask
|
| 1076 |
+
+ (1 - warp_mask) * pred_x0
|
| 1077 |
+
)
|
| 1078 |
+
|
| 1079 |
+
# get x_t from x_0
|
| 1080 |
+
latents = self.scheduler.add_noise(pred_x0, noise_pred, t).to(latents_dtype)
|
| 1081 |
+
|
| 1082 |
+
prev_t = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
|
| 1083 |
+
if i == len(timesteps) - 1:
|
| 1084 |
+
alpha_t_prev = 1.0
|
| 1085 |
+
else:
|
| 1086 |
+
alpha_t_prev = self.scheduler.alphas_cumprod[prev_t]
|
| 1087 |
+
|
| 1088 |
+
dir_xt = (1.0 - alpha_t_prev) ** 0.5 * noise_pred
|
| 1089 |
+
|
| 1090 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1091 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[
|
| 1092 |
+
0
|
| 1093 |
+
]
|
| 1094 |
+
|
| 1095 |
+
# call the callback, if provided
|
| 1096 |
+
if i == len(timesteps) - 1 or (
|
| 1097 |
+
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
|
| 1098 |
+
):
|
| 1099 |
+
progress_bar.update()
|
| 1100 |
+
if callback is not None and i % callback_steps == 0:
|
| 1101 |
+
callback(i, t, latents)
|
| 1102 |
+
|
| 1103 |
+
return latents
|
| 1104 |
+
|
| 1105 |
+
if mask_start_t <= mask_end_t:
|
| 1106 |
+
self.attn_state.to_load()
|
| 1107 |
+
else:
|
| 1108 |
+
self.attn_state.to_load_and_store_prev()
|
| 1109 |
+
latents = denoising_loop(init_latents)
|
| 1110 |
+
|
| 1111 |
+
if mask_start_t <= mask_end_t:
|
| 1112 |
+
direct_result = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1113 |
+
|
| 1114 |
+
blend_results = (1 - blend_mask_pre) * warped_pre + blend_mask_pre * direct_result
|
| 1115 |
+
blend_results = (1 - blend_mask_0) * warped_0 + blend_mask_0 * blend_results
|
| 1116 |
+
|
| 1117 |
+
bwd_occ = 1 - torch.clamp(1 - bwd_occ_pre + 1 - bwd_occ_0, 0, 1)
|
| 1118 |
+
blend_mask = blur(F.max_pool2d(bwd_occ, kernel_size=9, stride=1, padding=4))
|
| 1119 |
+
blend_mask = 1 - torch.clamp(blend_mask + bwd_occ, 0, 1)
|
| 1120 |
+
|
| 1121 |
+
blend_results = blend_results.to(latents.dtype)
|
| 1122 |
+
xtrg = self.vae.encode(blend_results).latent_dist.sample(generator)
|
| 1123 |
+
xtrg = self.vae.config.scaling_factor * xtrg
|
| 1124 |
+
blend_results_rec = self.vae.decode(xtrg / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1125 |
+
xtrg_rec = self.vae.encode(blend_results_rec).latent_dist.sample(generator)
|
| 1126 |
+
xtrg_rec = self.vae.config.scaling_factor * xtrg_rec
|
| 1127 |
+
xtrg_ = xtrg + (xtrg - xtrg_rec)
|
| 1128 |
+
blend_results_rec_new = self.vae.decode(xtrg_ / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1129 |
+
tmp = (abs(blend_results_rec_new - blend_results).mean(dim=1, keepdims=True) > 0.25).float()
|
| 1130 |
+
|
| 1131 |
+
mask_x = F.max_pool2d(
|
| 1132 |
+
(F.interpolate(tmp, scale_factor=1 / 8.0, mode="bilinear") > 0).float(),
|
| 1133 |
+
kernel_size=3,
|
| 1134 |
+
stride=1,
|
| 1135 |
+
padding=1,
|
| 1136 |
+
)
|
| 1137 |
+
|
| 1138 |
+
mask = 1 - F.max_pool2d(1 - blend_mask, kernel_size=8) # * (1-mask_x)
|
| 1139 |
+
|
| 1140 |
+
if smooth_boundary:
|
| 1141 |
+
noise_rescale = find_flat_region(mask)
|
| 1142 |
+
else:
|
| 1143 |
+
noise_rescale = torch.ones_like(mask)
|
| 1144 |
+
|
| 1145 |
+
xtrg = (xtrg + (1 - mask_x) * (xtrg - xtrg_rec)) * mask
|
| 1146 |
+
xtrg = xtrg.to(latents.dtype)
|
| 1147 |
+
|
| 1148 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1149 |
+
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 1150 |
+
|
| 1151 |
+
self.attn_state.to_load_and_store_prev()
|
| 1152 |
+
latents = denoising_loop(init_latents, mask * mask_strength, xtrg, noise_rescale)
|
| 1153 |
+
|
| 1154 |
+
if not output_type == "latent":
|
| 1155 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1156 |
+
else:
|
| 1157 |
+
image = latents
|
| 1158 |
+
|
| 1159 |
+
prev_result = image
|
| 1160 |
+
|
| 1161 |
+
do_denormalize = [True] * image.shape[0]
|
| 1162 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1163 |
+
|
| 1164 |
+
output_frames.append(image[0])
|
| 1165 |
+
|
| 1166 |
+
# Offload last model to CPU
|
| 1167 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1168 |
+
self.final_offload_hook.offload()
|
| 1169 |
+
|
| 1170 |
+
if not return_dict:
|
| 1171 |
+
return output_frames
|
| 1172 |
+
|
| 1173 |
+
return TextToVideoSDPipelineOutput(frames=output_frames)
|
| 1174 |
+
|
| 1175 |
+
|
| 1176 |
+
class InputPadder:
|
| 1177 |
+
"""Pads images such that dimensions are divisible by 8"""
|
| 1178 |
+
|
| 1179 |
+
def __init__(self, dims, mode="sintel", padding_factor=8):
|
| 1180 |
+
self.ht, self.wd = dims[-2:]
|
| 1181 |
+
pad_ht = (((self.ht // padding_factor) + 1) * padding_factor - self.ht) % padding_factor
|
| 1182 |
+
pad_wd = (((self.wd // padding_factor) + 1) * padding_factor - self.wd) % padding_factor
|
| 1183 |
+
if mode == "sintel":
|
| 1184 |
+
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2]
|
| 1185 |
+
else:
|
| 1186 |
+
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
|
| 1187 |
+
|
| 1188 |
+
def pad(self, *inputs):
|
| 1189 |
+
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
|
| 1190 |
+
|
| 1191 |
+
def unpad(self, x):
|
| 1192 |
+
ht, wd = x.shape[-2:]
|
| 1193 |
+
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
|
| 1194 |
+
return x[..., c[0] : c[1], c[2] : c[3]]
|
v0.27.0/run_onnx_controlnet.py
ADDED
|
@@ -0,0 +1,911 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import inspect
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import PIL.Image
|
| 10 |
+
import torch
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from transformers import CLIPTokenizer
|
| 13 |
+
|
| 14 |
+
from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
|
| 15 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 16 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 17 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 18 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 19 |
+
from diffusers.utils import (
|
| 20 |
+
deprecate,
|
| 21 |
+
logging,
|
| 22 |
+
replace_example_docstring,
|
| 23 |
+
)
|
| 24 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
EXAMPLE_DOC_STRING = """
|
| 31 |
+
Examples:
|
| 32 |
+
```py
|
| 33 |
+
>>> # !pip install opencv-python transformers accelerate
|
| 34 |
+
>>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
|
| 35 |
+
>>> from diffusers.utils import load_image
|
| 36 |
+
>>> import numpy as np
|
| 37 |
+
>>> import torch
|
| 38 |
+
|
| 39 |
+
>>> import cv2
|
| 40 |
+
>>> from PIL import Image
|
| 41 |
+
|
| 42 |
+
>>> # download an image
|
| 43 |
+
>>> image = load_image(
|
| 44 |
+
... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
| 45 |
+
... )
|
| 46 |
+
>>> np_image = np.array(image)
|
| 47 |
+
|
| 48 |
+
>>> # get canny image
|
| 49 |
+
>>> np_image = cv2.Canny(np_image, 100, 200)
|
| 50 |
+
>>> np_image = np_image[:, :, None]
|
| 51 |
+
>>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
|
| 52 |
+
>>> canny_image = Image.fromarray(np_image)
|
| 53 |
+
|
| 54 |
+
>>> # load control net and stable diffusion v1-5
|
| 55 |
+
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 56 |
+
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 57 |
+
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 58 |
+
... )
|
| 59 |
+
|
| 60 |
+
>>> # speed up diffusion process with faster scheduler and memory optimization
|
| 61 |
+
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 62 |
+
>>> pipe.enable_model_cpu_offload()
|
| 63 |
+
|
| 64 |
+
>>> # generate image
|
| 65 |
+
>>> generator = torch.manual_seed(0)
|
| 66 |
+
>>> image = pipe(
|
| 67 |
+
... "futuristic-looking woman",
|
| 68 |
+
... num_inference_steps=20,
|
| 69 |
+
... generator=generator,
|
| 70 |
+
... image=image,
|
| 71 |
+
... control_image=canny_image,
|
| 72 |
+
... ).images[0]
|
| 73 |
+
```
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def prepare_image(image):
|
| 78 |
+
if isinstance(image, torch.Tensor):
|
| 79 |
+
# Batch single image
|
| 80 |
+
if image.ndim == 3:
|
| 81 |
+
image = image.unsqueeze(0)
|
| 82 |
+
|
| 83 |
+
image = image.to(dtype=torch.float32)
|
| 84 |
+
else:
|
| 85 |
+
# preprocess image
|
| 86 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 87 |
+
image = [image]
|
| 88 |
+
|
| 89 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 90 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 91 |
+
image = np.concatenate(image, axis=0)
|
| 92 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 93 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 94 |
+
|
| 95 |
+
image = image.transpose(0, 3, 1, 2)
|
| 96 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 97 |
+
|
| 98 |
+
return image
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
|
| 102 |
+
vae_encoder: OnnxRuntimeModel
|
| 103 |
+
vae_decoder: OnnxRuntimeModel
|
| 104 |
+
text_encoder: OnnxRuntimeModel
|
| 105 |
+
tokenizer: CLIPTokenizer
|
| 106 |
+
unet: OnnxRuntimeModel
|
| 107 |
+
scheduler: KarrasDiffusionSchedulers
|
| 108 |
+
|
| 109 |
+
def __init__(
|
| 110 |
+
self,
|
| 111 |
+
vae_encoder: OnnxRuntimeModel,
|
| 112 |
+
vae_decoder: OnnxRuntimeModel,
|
| 113 |
+
text_encoder: OnnxRuntimeModel,
|
| 114 |
+
tokenizer: CLIPTokenizer,
|
| 115 |
+
unet: OnnxRuntimeModel,
|
| 116 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 117 |
+
):
|
| 118 |
+
super().__init__()
|
| 119 |
+
|
| 120 |
+
self.register_modules(
|
| 121 |
+
vae_encoder=vae_encoder,
|
| 122 |
+
vae_decoder=vae_decoder,
|
| 123 |
+
text_encoder=text_encoder,
|
| 124 |
+
tokenizer=tokenizer,
|
| 125 |
+
unet=unet,
|
| 126 |
+
scheduler=scheduler,
|
| 127 |
+
)
|
| 128 |
+
self.vae_scale_factor = 2 ** (4 - 1)
|
| 129 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 130 |
+
self.control_image_processor = VaeImageProcessor(
|
| 131 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
def _encode_prompt(
|
| 135 |
+
self,
|
| 136 |
+
prompt: Union[str, List[str]],
|
| 137 |
+
num_images_per_prompt: Optional[int],
|
| 138 |
+
do_classifier_free_guidance: bool,
|
| 139 |
+
negative_prompt: Optional[str],
|
| 140 |
+
prompt_embeds: Optional[np.ndarray] = None,
|
| 141 |
+
negative_prompt_embeds: Optional[np.ndarray] = None,
|
| 142 |
+
):
|
| 143 |
+
r"""
|
| 144 |
+
Encodes the prompt into text encoder hidden states.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
prompt (`str` or `List[str]`):
|
| 148 |
+
prompt to be encoded
|
| 149 |
+
num_images_per_prompt (`int`):
|
| 150 |
+
number of images that should be generated per prompt
|
| 151 |
+
do_classifier_free_guidance (`bool`):
|
| 152 |
+
whether to use classifier free guidance or not
|
| 153 |
+
negative_prompt (`str` or `List[str]`):
|
| 154 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 155 |
+
if `guidance_scale` is less than `1`).
|
| 156 |
+
prompt_embeds (`np.ndarray`, *optional*):
|
| 157 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 158 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 159 |
+
negative_prompt_embeds (`np.ndarray`, *optional*):
|
| 160 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 161 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 162 |
+
argument.
|
| 163 |
+
"""
|
| 164 |
+
if prompt is not None and isinstance(prompt, str):
|
| 165 |
+
batch_size = 1
|
| 166 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 167 |
+
batch_size = len(prompt)
|
| 168 |
+
else:
|
| 169 |
+
batch_size = prompt_embeds.shape[0]
|
| 170 |
+
|
| 171 |
+
if prompt_embeds is None:
|
| 172 |
+
# get prompt text embeddings
|
| 173 |
+
text_inputs = self.tokenizer(
|
| 174 |
+
prompt,
|
| 175 |
+
padding="max_length",
|
| 176 |
+
max_length=self.tokenizer.model_max_length,
|
| 177 |
+
truncation=True,
|
| 178 |
+
return_tensors="np",
|
| 179 |
+
)
|
| 180 |
+
text_input_ids = text_inputs.input_ids
|
| 181 |
+
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
|
| 182 |
+
|
| 183 |
+
if not np.array_equal(text_input_ids, untruncated_ids):
|
| 184 |
+
removed_text = self.tokenizer.batch_decode(
|
| 185 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 186 |
+
)
|
| 187 |
+
logger.warning(
|
| 188 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 189 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
|
| 193 |
+
|
| 194 |
+
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
| 195 |
+
|
| 196 |
+
# get unconditional embeddings for classifier free guidance
|
| 197 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 198 |
+
uncond_tokens: List[str]
|
| 199 |
+
if negative_prompt is None:
|
| 200 |
+
uncond_tokens = [""] * batch_size
|
| 201 |
+
elif type(prompt) is not type(negative_prompt):
|
| 202 |
+
raise TypeError(
|
| 203 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 204 |
+
f" {type(prompt)}."
|
| 205 |
+
)
|
| 206 |
+
elif isinstance(negative_prompt, str):
|
| 207 |
+
uncond_tokens = [negative_prompt] * batch_size
|
| 208 |
+
elif batch_size != len(negative_prompt):
|
| 209 |
+
raise ValueError(
|
| 210 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 211 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 212 |
+
" the batch size of `prompt`."
|
| 213 |
+
)
|
| 214 |
+
else:
|
| 215 |
+
uncond_tokens = negative_prompt
|
| 216 |
+
|
| 217 |
+
max_length = prompt_embeds.shape[1]
|
| 218 |
+
uncond_input = self.tokenizer(
|
| 219 |
+
uncond_tokens,
|
| 220 |
+
padding="max_length",
|
| 221 |
+
max_length=max_length,
|
| 222 |
+
truncation=True,
|
| 223 |
+
return_tensors="np",
|
| 224 |
+
)
|
| 225 |
+
negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
|
| 226 |
+
|
| 227 |
+
if do_classifier_free_guidance:
|
| 228 |
+
negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
|
| 229 |
+
|
| 230 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 231 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 232 |
+
# to avoid doing two forward passes
|
| 233 |
+
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
| 234 |
+
|
| 235 |
+
return prompt_embeds
|
| 236 |
+
|
| 237 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
| 238 |
+
def decode_latents(self, latents):
|
| 239 |
+
warnings.warn(
|
| 240 |
+
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
| 241 |
+
" use VaeImageProcessor instead",
|
| 242 |
+
FutureWarning,
|
| 243 |
+
)
|
| 244 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 245 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 246 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 247 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 248 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 249 |
+
return image
|
| 250 |
+
|
| 251 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 252 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 253 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 254 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 255 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 256 |
+
# and should be between [0, 1]
|
| 257 |
+
|
| 258 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 259 |
+
extra_step_kwargs = {}
|
| 260 |
+
if accepts_eta:
|
| 261 |
+
extra_step_kwargs["eta"] = eta
|
| 262 |
+
|
| 263 |
+
# check if the scheduler accepts generator
|
| 264 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 265 |
+
if accepts_generator:
|
| 266 |
+
extra_step_kwargs["generator"] = generator
|
| 267 |
+
return extra_step_kwargs
|
| 268 |
+
|
| 269 |
+
def check_inputs(
|
| 270 |
+
self,
|
| 271 |
+
num_controlnet,
|
| 272 |
+
prompt,
|
| 273 |
+
image,
|
| 274 |
+
callback_steps,
|
| 275 |
+
negative_prompt=None,
|
| 276 |
+
prompt_embeds=None,
|
| 277 |
+
negative_prompt_embeds=None,
|
| 278 |
+
controlnet_conditioning_scale=1.0,
|
| 279 |
+
control_guidance_start=0.0,
|
| 280 |
+
control_guidance_end=1.0,
|
| 281 |
+
):
|
| 282 |
+
if (callback_steps is None) or (
|
| 283 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 284 |
+
):
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 287 |
+
f" {type(callback_steps)}."
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
if prompt is not None and prompt_embeds is not None:
|
| 291 |
+
raise ValueError(
|
| 292 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 293 |
+
" only forward one of the two."
|
| 294 |
+
)
|
| 295 |
+
elif prompt is None and prompt_embeds is None:
|
| 296 |
+
raise ValueError(
|
| 297 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 298 |
+
)
|
| 299 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 300 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 301 |
+
|
| 302 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 303 |
+
raise ValueError(
|
| 304 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 305 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 309 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 310 |
+
raise ValueError(
|
| 311 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 312 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 313 |
+
f" {negative_prompt_embeds.shape}."
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# Check `image`
|
| 317 |
+
if num_controlnet == 1:
|
| 318 |
+
self.check_image(image, prompt, prompt_embeds)
|
| 319 |
+
elif num_controlnet > 1:
|
| 320 |
+
if not isinstance(image, list):
|
| 321 |
+
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
| 322 |
+
|
| 323 |
+
# When `image` is a nested list:
|
| 324 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 325 |
+
elif any(isinstance(i, list) for i in image):
|
| 326 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 327 |
+
elif len(image) != num_controlnet:
|
| 328 |
+
raise ValueError(
|
| 329 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
for image_ in image:
|
| 333 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 334 |
+
else:
|
| 335 |
+
assert False
|
| 336 |
+
|
| 337 |
+
# Check `controlnet_conditioning_scale`
|
| 338 |
+
if num_controlnet == 1:
|
| 339 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 340 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 341 |
+
elif num_controlnet > 1:
|
| 342 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 343 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 344 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 345 |
+
elif (
|
| 346 |
+
isinstance(controlnet_conditioning_scale, list)
|
| 347 |
+
and len(controlnet_conditioning_scale) != num_controlnet
|
| 348 |
+
):
|
| 349 |
+
raise ValueError(
|
| 350 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 351 |
+
" the same length as the number of controlnets"
|
| 352 |
+
)
|
| 353 |
+
else:
|
| 354 |
+
assert False
|
| 355 |
+
|
| 356 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 357 |
+
raise ValueError(
|
| 358 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
if num_controlnet > 1:
|
| 362 |
+
if len(control_guidance_start) != num_controlnet:
|
| 363 |
+
raise ValueError(
|
| 364 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 368 |
+
if start >= end:
|
| 369 |
+
raise ValueError(
|
| 370 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 371 |
+
)
|
| 372 |
+
if start < 0.0:
|
| 373 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 374 |
+
if end > 1.0:
|
| 375 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 376 |
+
|
| 377 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 378 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 379 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 380 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 381 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 382 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 383 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 384 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 385 |
+
|
| 386 |
+
if (
|
| 387 |
+
not image_is_pil
|
| 388 |
+
and not image_is_tensor
|
| 389 |
+
and not image_is_np
|
| 390 |
+
and not image_is_pil_list
|
| 391 |
+
and not image_is_tensor_list
|
| 392 |
+
and not image_is_np_list
|
| 393 |
+
):
|
| 394 |
+
raise TypeError(
|
| 395 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
if image_is_pil:
|
| 399 |
+
image_batch_size = 1
|
| 400 |
+
else:
|
| 401 |
+
image_batch_size = len(image)
|
| 402 |
+
|
| 403 |
+
if prompt is not None and isinstance(prompt, str):
|
| 404 |
+
prompt_batch_size = 1
|
| 405 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 406 |
+
prompt_batch_size = len(prompt)
|
| 407 |
+
elif prompt_embeds is not None:
|
| 408 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 409 |
+
|
| 410 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 411 |
+
raise ValueError(
|
| 412 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 416 |
+
def prepare_control_image(
|
| 417 |
+
self,
|
| 418 |
+
image,
|
| 419 |
+
width,
|
| 420 |
+
height,
|
| 421 |
+
batch_size,
|
| 422 |
+
num_images_per_prompt,
|
| 423 |
+
device,
|
| 424 |
+
dtype,
|
| 425 |
+
do_classifier_free_guidance=False,
|
| 426 |
+
guess_mode=False,
|
| 427 |
+
):
|
| 428 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 429 |
+
image_batch_size = image.shape[0]
|
| 430 |
+
|
| 431 |
+
if image_batch_size == 1:
|
| 432 |
+
repeat_by = batch_size
|
| 433 |
+
else:
|
| 434 |
+
# image batch size is the same as prompt batch size
|
| 435 |
+
repeat_by = num_images_per_prompt
|
| 436 |
+
|
| 437 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 438 |
+
|
| 439 |
+
image = image.to(device=device, dtype=dtype)
|
| 440 |
+
|
| 441 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 442 |
+
image = torch.cat([image] * 2)
|
| 443 |
+
|
| 444 |
+
return image
|
| 445 |
+
|
| 446 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 447 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 448 |
+
# get the original timestep using init_timestep
|
| 449 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 450 |
+
|
| 451 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 452 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 453 |
+
|
| 454 |
+
return timesteps, num_inference_steps - t_start
|
| 455 |
+
|
| 456 |
+
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
| 457 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 458 |
+
raise ValueError(
|
| 459 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
image = image.to(device=device, dtype=dtype)
|
| 463 |
+
|
| 464 |
+
batch_size = batch_size * num_images_per_prompt
|
| 465 |
+
|
| 466 |
+
if image.shape[1] == 4:
|
| 467 |
+
init_latents = image
|
| 468 |
+
|
| 469 |
+
else:
|
| 470 |
+
_image = image.cpu().detach().numpy()
|
| 471 |
+
init_latents = self.vae_encoder(sample=_image)[0]
|
| 472 |
+
init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
|
| 473 |
+
init_latents = 0.18215 * init_latents
|
| 474 |
+
|
| 475 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 476 |
+
# expand init_latents for batch_size
|
| 477 |
+
deprecation_message = (
|
| 478 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 479 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 480 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 481 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 482 |
+
)
|
| 483 |
+
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 484 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 485 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 486 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 487 |
+
raise ValueError(
|
| 488 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 489 |
+
)
|
| 490 |
+
else:
|
| 491 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 492 |
+
|
| 493 |
+
shape = init_latents.shape
|
| 494 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 495 |
+
|
| 496 |
+
# get latents
|
| 497 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 498 |
+
latents = init_latents
|
| 499 |
+
|
| 500 |
+
return latents
|
| 501 |
+
|
| 502 |
+
@torch.no_grad()
|
| 503 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 504 |
+
def __call__(
|
| 505 |
+
self,
|
| 506 |
+
num_controlnet: int,
|
| 507 |
+
fp16: bool = True,
|
| 508 |
+
prompt: Union[str, List[str]] = None,
|
| 509 |
+
image: Union[
|
| 510 |
+
torch.FloatTensor,
|
| 511 |
+
PIL.Image.Image,
|
| 512 |
+
np.ndarray,
|
| 513 |
+
List[torch.FloatTensor],
|
| 514 |
+
List[PIL.Image.Image],
|
| 515 |
+
List[np.ndarray],
|
| 516 |
+
] = None,
|
| 517 |
+
control_image: Union[
|
| 518 |
+
torch.FloatTensor,
|
| 519 |
+
PIL.Image.Image,
|
| 520 |
+
np.ndarray,
|
| 521 |
+
List[torch.FloatTensor],
|
| 522 |
+
List[PIL.Image.Image],
|
| 523 |
+
List[np.ndarray],
|
| 524 |
+
] = None,
|
| 525 |
+
height: Optional[int] = None,
|
| 526 |
+
width: Optional[int] = None,
|
| 527 |
+
strength: float = 0.8,
|
| 528 |
+
num_inference_steps: int = 50,
|
| 529 |
+
guidance_scale: float = 7.5,
|
| 530 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 531 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 532 |
+
eta: float = 0.0,
|
| 533 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 534 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 535 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 536 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 537 |
+
output_type: Optional[str] = "pil",
|
| 538 |
+
return_dict: bool = True,
|
| 539 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 540 |
+
callback_steps: int = 1,
|
| 541 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 542 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 543 |
+
guess_mode: bool = False,
|
| 544 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 545 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 546 |
+
):
|
| 547 |
+
r"""
|
| 548 |
+
Function invoked when calling the pipeline for generation.
|
| 549 |
+
|
| 550 |
+
Args:
|
| 551 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 552 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 553 |
+
instead.
|
| 554 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 555 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 556 |
+
The initial image will be used as the starting point for the image generation process. Can also accept
|
| 557 |
+
image latents as `image`, if passing latents directly, it will not be encoded again.
|
| 558 |
+
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 559 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 560 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
| 561 |
+
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
| 562 |
+
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
|
| 563 |
+
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
|
| 564 |
+
specified in init, images must be passed as a list such that each element of the list can be correctly
|
| 565 |
+
batched for input to a single controlnet.
|
| 566 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 567 |
+
The height in pixels of the generated image.
|
| 568 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 569 |
+
The width in pixels of the generated image.
|
| 570 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 571 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 572 |
+
expense of slower inference.
|
| 573 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 574 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 575 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 576 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 577 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 578 |
+
usually at the expense of lower image quality.
|
| 579 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 580 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 581 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 582 |
+
less than `1`).
|
| 583 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 584 |
+
The number of images to generate per prompt.
|
| 585 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 586 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 587 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 588 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 589 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 590 |
+
to make generation deterministic.
|
| 591 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 592 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 593 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 594 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 595 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 596 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 597 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 598 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 599 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 600 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 601 |
+
argument.
|
| 602 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 603 |
+
The output format of the generate image. Choose between
|
| 604 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 605 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 606 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 607 |
+
plain tuple.
|
| 608 |
+
callback (`Callable`, *optional*):
|
| 609 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 610 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 611 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 612 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 613 |
+
called at every step.
|
| 614 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 615 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 616 |
+
`self.processor` in
|
| 617 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 618 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 619 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 620 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 621 |
+
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
|
| 622 |
+
than for [`~StableDiffusionControlNetPipeline.__call__`].
|
| 623 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 624 |
+
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
| 625 |
+
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
| 626 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 627 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 628 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 629 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 630 |
+
|
| 631 |
+
Examples:
|
| 632 |
+
|
| 633 |
+
Returns:
|
| 634 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 635 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 636 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 637 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 638 |
+
(nsfw) content, according to the `safety_checker`.
|
| 639 |
+
"""
|
| 640 |
+
if fp16:
|
| 641 |
+
torch_dtype = torch.float16
|
| 642 |
+
np_dtype = np.float16
|
| 643 |
+
else:
|
| 644 |
+
torch_dtype = torch.float32
|
| 645 |
+
np_dtype = np.float32
|
| 646 |
+
|
| 647 |
+
# align format for control guidance
|
| 648 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 649 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 650 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 651 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 652 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 653 |
+
mult = num_controlnet
|
| 654 |
+
control_guidance_start, control_guidance_end = (
|
| 655 |
+
mult * [control_guidance_start],
|
| 656 |
+
mult * [control_guidance_end],
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
# 1. Check inputs. Raise error if not correct
|
| 660 |
+
self.check_inputs(
|
| 661 |
+
num_controlnet,
|
| 662 |
+
prompt,
|
| 663 |
+
control_image,
|
| 664 |
+
callback_steps,
|
| 665 |
+
negative_prompt,
|
| 666 |
+
prompt_embeds,
|
| 667 |
+
negative_prompt_embeds,
|
| 668 |
+
controlnet_conditioning_scale,
|
| 669 |
+
control_guidance_start,
|
| 670 |
+
control_guidance_end,
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
# 2. Define call parameters
|
| 674 |
+
if prompt is not None and isinstance(prompt, str):
|
| 675 |
+
batch_size = 1
|
| 676 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 677 |
+
batch_size = len(prompt)
|
| 678 |
+
else:
|
| 679 |
+
batch_size = prompt_embeds.shape[0]
|
| 680 |
+
|
| 681 |
+
device = self._execution_device
|
| 682 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 683 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 684 |
+
# corresponds to doing no classifier free guidance.
|
| 685 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 686 |
+
|
| 687 |
+
if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
|
| 688 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
|
| 689 |
+
|
| 690 |
+
# 3. Encode input prompt
|
| 691 |
+
prompt_embeds = self._encode_prompt(
|
| 692 |
+
prompt,
|
| 693 |
+
num_images_per_prompt,
|
| 694 |
+
do_classifier_free_guidance,
|
| 695 |
+
negative_prompt,
|
| 696 |
+
prompt_embeds=prompt_embeds,
|
| 697 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 698 |
+
)
|
| 699 |
+
# 4. Prepare image
|
| 700 |
+
image = self.image_processor.preprocess(image).to(dtype=torch.float32)
|
| 701 |
+
|
| 702 |
+
# 5. Prepare controlnet_conditioning_image
|
| 703 |
+
if num_controlnet == 1:
|
| 704 |
+
control_image = self.prepare_control_image(
|
| 705 |
+
image=control_image,
|
| 706 |
+
width=width,
|
| 707 |
+
height=height,
|
| 708 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 709 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 710 |
+
device=device,
|
| 711 |
+
dtype=torch_dtype,
|
| 712 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 713 |
+
guess_mode=guess_mode,
|
| 714 |
+
)
|
| 715 |
+
elif num_controlnet > 1:
|
| 716 |
+
control_images = []
|
| 717 |
+
|
| 718 |
+
for control_image_ in control_image:
|
| 719 |
+
control_image_ = self.prepare_control_image(
|
| 720 |
+
image=control_image_,
|
| 721 |
+
width=width,
|
| 722 |
+
height=height,
|
| 723 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 724 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 725 |
+
device=device,
|
| 726 |
+
dtype=torch_dtype,
|
| 727 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 728 |
+
guess_mode=guess_mode,
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
control_images.append(control_image_)
|
| 732 |
+
|
| 733 |
+
control_image = control_images
|
| 734 |
+
else:
|
| 735 |
+
assert False
|
| 736 |
+
|
| 737 |
+
# 5. Prepare timesteps
|
| 738 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 739 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 740 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 741 |
+
|
| 742 |
+
# 6. Prepare latent variables
|
| 743 |
+
latents = self.prepare_latents(
|
| 744 |
+
image,
|
| 745 |
+
latent_timestep,
|
| 746 |
+
batch_size,
|
| 747 |
+
num_images_per_prompt,
|
| 748 |
+
torch_dtype,
|
| 749 |
+
device,
|
| 750 |
+
generator,
|
| 751 |
+
)
|
| 752 |
+
|
| 753 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 754 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 755 |
+
|
| 756 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 757 |
+
controlnet_keep = []
|
| 758 |
+
for i in range(len(timesteps)):
|
| 759 |
+
keeps = [
|
| 760 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 761 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 762 |
+
]
|
| 763 |
+
controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
|
| 764 |
+
|
| 765 |
+
# 8. Denoising loop
|
| 766 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 767 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 768 |
+
for i, t in enumerate(timesteps):
|
| 769 |
+
# expand the latents if we are doing classifier free guidance
|
| 770 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 771 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 772 |
+
|
| 773 |
+
if isinstance(controlnet_keep[i], list):
|
| 774 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 775 |
+
else:
|
| 776 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 777 |
+
if isinstance(controlnet_cond_scale, list):
|
| 778 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 779 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 780 |
+
|
| 781 |
+
# predict the noise residual
|
| 782 |
+
_latent_model_input = latent_model_input.cpu().detach().numpy()
|
| 783 |
+
_prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
|
| 784 |
+
_t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
|
| 785 |
+
|
| 786 |
+
if num_controlnet == 1:
|
| 787 |
+
control_images = np.array([control_image], dtype=np_dtype)
|
| 788 |
+
else:
|
| 789 |
+
control_images = []
|
| 790 |
+
for _control_img in control_image:
|
| 791 |
+
_control_img = _control_img.cpu().detach().numpy()
|
| 792 |
+
control_images.append(_control_img)
|
| 793 |
+
control_images = np.array(control_images, dtype=np_dtype)
|
| 794 |
+
|
| 795 |
+
control_scales = np.array(cond_scale, dtype=np_dtype)
|
| 796 |
+
control_scales = np.resize(control_scales, (num_controlnet, 1))
|
| 797 |
+
|
| 798 |
+
noise_pred = self.unet(
|
| 799 |
+
sample=_latent_model_input,
|
| 800 |
+
timestep=_t,
|
| 801 |
+
encoder_hidden_states=_prompt_embeds,
|
| 802 |
+
controlnet_conds=control_images,
|
| 803 |
+
conditioning_scales=control_scales,
|
| 804 |
+
)[0]
|
| 805 |
+
noise_pred = torch.from_numpy(noise_pred).to(device)
|
| 806 |
+
|
| 807 |
+
# perform guidance
|
| 808 |
+
if do_classifier_free_guidance:
|
| 809 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 810 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 811 |
+
|
| 812 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 813 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 814 |
+
|
| 815 |
+
# call the callback, if provided
|
| 816 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 817 |
+
progress_bar.update()
|
| 818 |
+
if callback is not None and i % callback_steps == 0:
|
| 819 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 820 |
+
callback(step_idx, t, latents)
|
| 821 |
+
|
| 822 |
+
if not output_type == "latent":
|
| 823 |
+
_latents = latents.cpu().detach().numpy() / 0.18215
|
| 824 |
+
_latents = np.array(_latents, dtype=np_dtype)
|
| 825 |
+
image = self.vae_decoder(latent_sample=_latents)[0]
|
| 826 |
+
image = torch.from_numpy(image).to(device, dtype=torch.float32)
|
| 827 |
+
has_nsfw_concept = None
|
| 828 |
+
else:
|
| 829 |
+
image = latents
|
| 830 |
+
has_nsfw_concept = None
|
| 831 |
+
|
| 832 |
+
if has_nsfw_concept is None:
|
| 833 |
+
do_denormalize = [True] * image.shape[0]
|
| 834 |
+
else:
|
| 835 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 836 |
+
|
| 837 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 838 |
+
|
| 839 |
+
if not return_dict:
|
| 840 |
+
return (image, has_nsfw_concept)
|
| 841 |
+
|
| 842 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
if __name__ == "__main__":
|
| 846 |
+
parser = argparse.ArgumentParser()
|
| 847 |
+
|
| 848 |
+
parser.add_argument(
|
| 849 |
+
"--sd_model",
|
| 850 |
+
type=str,
|
| 851 |
+
required=True,
|
| 852 |
+
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
|
| 853 |
+
)
|
| 854 |
+
|
| 855 |
+
parser.add_argument(
|
| 856 |
+
"--onnx_model_dir",
|
| 857 |
+
type=str,
|
| 858 |
+
required=True,
|
| 859 |
+
help="Path to the ONNX directory",
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
|
| 863 |
+
|
| 864 |
+
args = parser.parse_args()
|
| 865 |
+
|
| 866 |
+
qr_image = Image.open(args.qr_img_path)
|
| 867 |
+
qr_image = qr_image.resize((512, 512))
|
| 868 |
+
|
| 869 |
+
# init stable diffusion pipeline
|
| 870 |
+
pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
|
| 871 |
+
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
| 872 |
+
|
| 873 |
+
provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
| 874 |
+
onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline(
|
| 875 |
+
vae_encoder=OnnxRuntimeModel.from_pretrained(
|
| 876 |
+
os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
|
| 877 |
+
),
|
| 878 |
+
vae_decoder=OnnxRuntimeModel.from_pretrained(
|
| 879 |
+
os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
|
| 880 |
+
),
|
| 881 |
+
text_encoder=OnnxRuntimeModel.from_pretrained(
|
| 882 |
+
os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
|
| 883 |
+
),
|
| 884 |
+
tokenizer=pipeline.tokenizer,
|
| 885 |
+
unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider),
|
| 886 |
+
scheduler=pipeline.scheduler,
|
| 887 |
+
)
|
| 888 |
+
onnx_pipeline = onnx_pipeline.to("cuda")
|
| 889 |
+
|
| 890 |
+
prompt = "a cute cat fly to the moon"
|
| 891 |
+
negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
|
| 892 |
+
|
| 893 |
+
for i in range(10):
|
| 894 |
+
start_time = time.time()
|
| 895 |
+
image = onnx_pipeline(
|
| 896 |
+
num_controlnet=2,
|
| 897 |
+
prompt=prompt,
|
| 898 |
+
negative_prompt=negative_prompt,
|
| 899 |
+
image=qr_image,
|
| 900 |
+
control_image=[qr_image, qr_image],
|
| 901 |
+
width=512,
|
| 902 |
+
height=512,
|
| 903 |
+
strength=0.75,
|
| 904 |
+
num_inference_steps=20,
|
| 905 |
+
num_images_per_prompt=1,
|
| 906 |
+
controlnet_conditioning_scale=[0.8, 0.8],
|
| 907 |
+
control_guidance_start=[0.3, 0.3],
|
| 908 |
+
control_guidance_end=[0.9, 0.9],
|
| 909 |
+
).images[0]
|
| 910 |
+
print(time.time() - start_time)
|
| 911 |
+
image.save("output_qr_code.png")
|
v0.27.0/run_tensorrt_controlnet.py
ADDED
|
@@ -0,0 +1,1022 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import atexit
|
| 3 |
+
import inspect
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import PIL.Image
|
| 11 |
+
import pycuda.driver as cuda
|
| 12 |
+
import tensorrt as trt
|
| 13 |
+
import torch
|
| 14 |
+
from PIL import Image
|
| 15 |
+
from pycuda.tools import make_default_context
|
| 16 |
+
from transformers import CLIPTokenizer
|
| 17 |
+
|
| 18 |
+
from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
|
| 19 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 21 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 22 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 23 |
+
from diffusers.utils import (
|
| 24 |
+
deprecate,
|
| 25 |
+
logging,
|
| 26 |
+
replace_example_docstring,
|
| 27 |
+
)
|
| 28 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Initialize CUDA
|
| 32 |
+
cuda.init()
|
| 33 |
+
context = make_default_context()
|
| 34 |
+
device = context.get_device()
|
| 35 |
+
atexit.register(context.pop)
|
| 36 |
+
|
| 37 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def load_engine(trt_runtime, engine_path):
|
| 41 |
+
with open(engine_path, "rb") as f:
|
| 42 |
+
engine_data = f.read()
|
| 43 |
+
engine = trt_runtime.deserialize_cuda_engine(engine_data)
|
| 44 |
+
return engine
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class TensorRTModel:
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
trt_engine_path,
|
| 51 |
+
**kwargs,
|
| 52 |
+
):
|
| 53 |
+
cuda.init()
|
| 54 |
+
stream = cuda.Stream()
|
| 55 |
+
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
|
| 56 |
+
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
|
| 57 |
+
trt_runtime = trt.Runtime(TRT_LOGGER)
|
| 58 |
+
engine = load_engine(trt_runtime, trt_engine_path)
|
| 59 |
+
context = engine.create_execution_context()
|
| 60 |
+
|
| 61 |
+
# allocates memory for network inputs/outputs on both CPU and GPU
|
| 62 |
+
host_inputs = []
|
| 63 |
+
cuda_inputs = []
|
| 64 |
+
host_outputs = []
|
| 65 |
+
cuda_outputs = []
|
| 66 |
+
bindings = []
|
| 67 |
+
input_names = []
|
| 68 |
+
output_names = []
|
| 69 |
+
|
| 70 |
+
for binding in engine:
|
| 71 |
+
datatype = engine.get_binding_dtype(binding)
|
| 72 |
+
if datatype == trt.DataType.HALF:
|
| 73 |
+
dtype = np.float16
|
| 74 |
+
else:
|
| 75 |
+
dtype = np.float32
|
| 76 |
+
|
| 77 |
+
shape = tuple(engine.get_binding_shape(binding))
|
| 78 |
+
host_mem = cuda.pagelocked_empty(shape, dtype)
|
| 79 |
+
cuda_mem = cuda.mem_alloc(host_mem.nbytes)
|
| 80 |
+
bindings.append(int(cuda_mem))
|
| 81 |
+
|
| 82 |
+
if engine.binding_is_input(binding):
|
| 83 |
+
host_inputs.append(host_mem)
|
| 84 |
+
cuda_inputs.append(cuda_mem)
|
| 85 |
+
input_names.append(binding)
|
| 86 |
+
else:
|
| 87 |
+
host_outputs.append(host_mem)
|
| 88 |
+
cuda_outputs.append(cuda_mem)
|
| 89 |
+
output_names.append(binding)
|
| 90 |
+
|
| 91 |
+
self.stream = stream
|
| 92 |
+
self.context = context
|
| 93 |
+
self.engine = engine
|
| 94 |
+
|
| 95 |
+
self.host_inputs = host_inputs
|
| 96 |
+
self.cuda_inputs = cuda_inputs
|
| 97 |
+
self.host_outputs = host_outputs
|
| 98 |
+
self.cuda_outputs = cuda_outputs
|
| 99 |
+
self.bindings = bindings
|
| 100 |
+
self.batch_size = engine.max_batch_size
|
| 101 |
+
|
| 102 |
+
self.input_names = input_names
|
| 103 |
+
self.output_names = output_names
|
| 104 |
+
|
| 105 |
+
def __call__(self, **kwargs):
|
| 106 |
+
context = self.context
|
| 107 |
+
stream = self.stream
|
| 108 |
+
bindings = self.bindings
|
| 109 |
+
|
| 110 |
+
host_inputs = self.host_inputs
|
| 111 |
+
cuda_inputs = self.cuda_inputs
|
| 112 |
+
host_outputs = self.host_outputs
|
| 113 |
+
cuda_outputs = self.cuda_outputs
|
| 114 |
+
|
| 115 |
+
for idx, input_name in enumerate(self.input_names):
|
| 116 |
+
_input = kwargs[input_name]
|
| 117 |
+
np.copyto(host_inputs[idx], _input)
|
| 118 |
+
# transfer input data to the GPU
|
| 119 |
+
cuda.memcpy_htod_async(cuda_inputs[idx], host_inputs[idx], stream)
|
| 120 |
+
|
| 121 |
+
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
|
| 122 |
+
|
| 123 |
+
result = {}
|
| 124 |
+
for idx, output_name in enumerate(self.output_names):
|
| 125 |
+
# transfer predictions back from the GPU
|
| 126 |
+
cuda.memcpy_dtoh_async(host_outputs[idx], cuda_outputs[idx], stream)
|
| 127 |
+
result[output_name] = host_outputs[idx]
|
| 128 |
+
|
| 129 |
+
stream.synchronize()
|
| 130 |
+
|
| 131 |
+
return result
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
EXAMPLE_DOC_STRING = """
|
| 135 |
+
Examples:
|
| 136 |
+
```py
|
| 137 |
+
>>> # !pip install opencv-python transformers accelerate
|
| 138 |
+
>>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
|
| 139 |
+
>>> from diffusers.utils import load_image
|
| 140 |
+
>>> import numpy as np
|
| 141 |
+
>>> import torch
|
| 142 |
+
|
| 143 |
+
>>> import cv2
|
| 144 |
+
>>> from PIL import Image
|
| 145 |
+
|
| 146 |
+
>>> # download an image
|
| 147 |
+
>>> image = load_image(
|
| 148 |
+
... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
| 149 |
+
... )
|
| 150 |
+
>>> np_image = np.array(image)
|
| 151 |
+
|
| 152 |
+
>>> # get canny image
|
| 153 |
+
>>> np_image = cv2.Canny(np_image, 100, 200)
|
| 154 |
+
>>> np_image = np_image[:, :, None]
|
| 155 |
+
>>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
|
| 156 |
+
>>> canny_image = Image.fromarray(np_image)
|
| 157 |
+
|
| 158 |
+
>>> # load control net and stable diffusion v1-5
|
| 159 |
+
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 160 |
+
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 161 |
+
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 162 |
+
... )
|
| 163 |
+
|
| 164 |
+
>>> # speed up diffusion process with faster scheduler and memory optimization
|
| 165 |
+
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 166 |
+
>>> pipe.enable_model_cpu_offload()
|
| 167 |
+
|
| 168 |
+
>>> # generate image
|
| 169 |
+
>>> generator = torch.manual_seed(0)
|
| 170 |
+
>>> image = pipe(
|
| 171 |
+
... "futuristic-looking woman",
|
| 172 |
+
... num_inference_steps=20,
|
| 173 |
+
... generator=generator,
|
| 174 |
+
... image=image,
|
| 175 |
+
... control_image=canny_image,
|
| 176 |
+
... ).images[0]
|
| 177 |
+
```
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def prepare_image(image):
|
| 182 |
+
if isinstance(image, torch.Tensor):
|
| 183 |
+
# Batch single image
|
| 184 |
+
if image.ndim == 3:
|
| 185 |
+
image = image.unsqueeze(0)
|
| 186 |
+
|
| 187 |
+
image = image.to(dtype=torch.float32)
|
| 188 |
+
else:
|
| 189 |
+
# preprocess image
|
| 190 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 191 |
+
image = [image]
|
| 192 |
+
|
| 193 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 194 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 195 |
+
image = np.concatenate(image, axis=0)
|
| 196 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 197 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 198 |
+
|
| 199 |
+
image = image.transpose(0, 3, 1, 2)
|
| 200 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 201 |
+
|
| 202 |
+
return image
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
|
| 206 |
+
vae_encoder: OnnxRuntimeModel
|
| 207 |
+
vae_decoder: OnnxRuntimeModel
|
| 208 |
+
text_encoder: OnnxRuntimeModel
|
| 209 |
+
tokenizer: CLIPTokenizer
|
| 210 |
+
unet: TensorRTModel
|
| 211 |
+
scheduler: KarrasDiffusionSchedulers
|
| 212 |
+
|
| 213 |
+
def __init__(
|
| 214 |
+
self,
|
| 215 |
+
vae_encoder: OnnxRuntimeModel,
|
| 216 |
+
vae_decoder: OnnxRuntimeModel,
|
| 217 |
+
text_encoder: OnnxRuntimeModel,
|
| 218 |
+
tokenizer: CLIPTokenizer,
|
| 219 |
+
unet: TensorRTModel,
|
| 220 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 221 |
+
):
|
| 222 |
+
super().__init__()
|
| 223 |
+
|
| 224 |
+
self.register_modules(
|
| 225 |
+
vae_encoder=vae_encoder,
|
| 226 |
+
vae_decoder=vae_decoder,
|
| 227 |
+
text_encoder=text_encoder,
|
| 228 |
+
tokenizer=tokenizer,
|
| 229 |
+
unet=unet,
|
| 230 |
+
scheduler=scheduler,
|
| 231 |
+
)
|
| 232 |
+
self.vae_scale_factor = 2 ** (4 - 1)
|
| 233 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 234 |
+
self.control_image_processor = VaeImageProcessor(
|
| 235 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
def _encode_prompt(
|
| 239 |
+
self,
|
| 240 |
+
prompt: Union[str, List[str]],
|
| 241 |
+
num_images_per_prompt: Optional[int],
|
| 242 |
+
do_classifier_free_guidance: bool,
|
| 243 |
+
negative_prompt: Optional[str],
|
| 244 |
+
prompt_embeds: Optional[np.ndarray] = None,
|
| 245 |
+
negative_prompt_embeds: Optional[np.ndarray] = None,
|
| 246 |
+
):
|
| 247 |
+
r"""
|
| 248 |
+
Encodes the prompt into text encoder hidden states.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
prompt (`str` or `List[str]`):
|
| 252 |
+
prompt to be encoded
|
| 253 |
+
num_images_per_prompt (`int`):
|
| 254 |
+
number of images that should be generated per prompt
|
| 255 |
+
do_classifier_free_guidance (`bool`):
|
| 256 |
+
whether to use classifier free guidance or not
|
| 257 |
+
negative_prompt (`str` or `List[str]`):
|
| 258 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 259 |
+
if `guidance_scale` is less than `1`).
|
| 260 |
+
prompt_embeds (`np.ndarray`, *optional*):
|
| 261 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 262 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 263 |
+
negative_prompt_embeds (`np.ndarray`, *optional*):
|
| 264 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 265 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 266 |
+
argument.
|
| 267 |
+
"""
|
| 268 |
+
if prompt is not None and isinstance(prompt, str):
|
| 269 |
+
batch_size = 1
|
| 270 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 271 |
+
batch_size = len(prompt)
|
| 272 |
+
else:
|
| 273 |
+
batch_size = prompt_embeds.shape[0]
|
| 274 |
+
|
| 275 |
+
if prompt_embeds is None:
|
| 276 |
+
# get prompt text embeddings
|
| 277 |
+
text_inputs = self.tokenizer(
|
| 278 |
+
prompt,
|
| 279 |
+
padding="max_length",
|
| 280 |
+
max_length=self.tokenizer.model_max_length,
|
| 281 |
+
truncation=True,
|
| 282 |
+
return_tensors="np",
|
| 283 |
+
)
|
| 284 |
+
text_input_ids = text_inputs.input_ids
|
| 285 |
+
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
|
| 286 |
+
|
| 287 |
+
if not np.array_equal(text_input_ids, untruncated_ids):
|
| 288 |
+
removed_text = self.tokenizer.batch_decode(
|
| 289 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 290 |
+
)
|
| 291 |
+
logger.warning(
|
| 292 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 293 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
|
| 297 |
+
|
| 298 |
+
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
| 299 |
+
|
| 300 |
+
# get unconditional embeddings for classifier free guidance
|
| 301 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 302 |
+
uncond_tokens: List[str]
|
| 303 |
+
if negative_prompt is None:
|
| 304 |
+
uncond_tokens = [""] * batch_size
|
| 305 |
+
elif type(prompt) is not type(negative_prompt):
|
| 306 |
+
raise TypeError(
|
| 307 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 308 |
+
f" {type(prompt)}."
|
| 309 |
+
)
|
| 310 |
+
elif isinstance(negative_prompt, str):
|
| 311 |
+
uncond_tokens = [negative_prompt] * batch_size
|
| 312 |
+
elif batch_size != len(negative_prompt):
|
| 313 |
+
raise ValueError(
|
| 314 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 315 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 316 |
+
" the batch size of `prompt`."
|
| 317 |
+
)
|
| 318 |
+
else:
|
| 319 |
+
uncond_tokens = negative_prompt
|
| 320 |
+
|
| 321 |
+
max_length = prompt_embeds.shape[1]
|
| 322 |
+
uncond_input = self.tokenizer(
|
| 323 |
+
uncond_tokens,
|
| 324 |
+
padding="max_length",
|
| 325 |
+
max_length=max_length,
|
| 326 |
+
truncation=True,
|
| 327 |
+
return_tensors="np",
|
| 328 |
+
)
|
| 329 |
+
negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
|
| 330 |
+
|
| 331 |
+
if do_classifier_free_guidance:
|
| 332 |
+
negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
|
| 333 |
+
|
| 334 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 335 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 336 |
+
# to avoid doing two forward passes
|
| 337 |
+
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
| 338 |
+
|
| 339 |
+
return prompt_embeds
|
| 340 |
+
|
| 341 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
| 342 |
+
def decode_latents(self, latents):
|
| 343 |
+
warnings.warn(
|
| 344 |
+
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
| 345 |
+
" use VaeImageProcessor instead",
|
| 346 |
+
FutureWarning,
|
| 347 |
+
)
|
| 348 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 349 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 350 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 351 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 352 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 353 |
+
return image
|
| 354 |
+
|
| 355 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 356 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 357 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 358 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 359 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 360 |
+
# and should be between [0, 1]
|
| 361 |
+
|
| 362 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 363 |
+
extra_step_kwargs = {}
|
| 364 |
+
if accepts_eta:
|
| 365 |
+
extra_step_kwargs["eta"] = eta
|
| 366 |
+
|
| 367 |
+
# check if the scheduler accepts generator
|
| 368 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 369 |
+
if accepts_generator:
|
| 370 |
+
extra_step_kwargs["generator"] = generator
|
| 371 |
+
return extra_step_kwargs
|
| 372 |
+
|
| 373 |
+
def check_inputs(
|
| 374 |
+
self,
|
| 375 |
+
num_controlnet,
|
| 376 |
+
prompt,
|
| 377 |
+
image,
|
| 378 |
+
callback_steps,
|
| 379 |
+
negative_prompt=None,
|
| 380 |
+
prompt_embeds=None,
|
| 381 |
+
negative_prompt_embeds=None,
|
| 382 |
+
controlnet_conditioning_scale=1.0,
|
| 383 |
+
control_guidance_start=0.0,
|
| 384 |
+
control_guidance_end=1.0,
|
| 385 |
+
):
|
| 386 |
+
if (callback_steps is None) or (
|
| 387 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 388 |
+
):
|
| 389 |
+
raise ValueError(
|
| 390 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 391 |
+
f" {type(callback_steps)}."
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
if prompt is not None and prompt_embeds is not None:
|
| 395 |
+
raise ValueError(
|
| 396 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 397 |
+
" only forward one of the two."
|
| 398 |
+
)
|
| 399 |
+
elif prompt is None and prompt_embeds is None:
|
| 400 |
+
raise ValueError(
|
| 401 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 402 |
+
)
|
| 403 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 404 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 405 |
+
|
| 406 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 407 |
+
raise ValueError(
|
| 408 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 409 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 413 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 414 |
+
raise ValueError(
|
| 415 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 416 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 417 |
+
f" {negative_prompt_embeds.shape}."
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
# Check `image`
|
| 421 |
+
if num_controlnet == 1:
|
| 422 |
+
self.check_image(image, prompt, prompt_embeds)
|
| 423 |
+
elif num_controlnet > 1:
|
| 424 |
+
if not isinstance(image, list):
|
| 425 |
+
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
| 426 |
+
|
| 427 |
+
# When `image` is a nested list:
|
| 428 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 429 |
+
elif any(isinstance(i, list) for i in image):
|
| 430 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 431 |
+
elif len(image) != num_controlnet:
|
| 432 |
+
raise ValueError(
|
| 433 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
for image_ in image:
|
| 437 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 438 |
+
else:
|
| 439 |
+
assert False
|
| 440 |
+
|
| 441 |
+
# Check `controlnet_conditioning_scale`
|
| 442 |
+
if num_controlnet == 1:
|
| 443 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 444 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 445 |
+
elif num_controlnet > 1:
|
| 446 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 447 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 448 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 449 |
+
elif (
|
| 450 |
+
isinstance(controlnet_conditioning_scale, list)
|
| 451 |
+
and len(controlnet_conditioning_scale) != num_controlnet
|
| 452 |
+
):
|
| 453 |
+
raise ValueError(
|
| 454 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 455 |
+
" the same length as the number of controlnets"
|
| 456 |
+
)
|
| 457 |
+
else:
|
| 458 |
+
assert False
|
| 459 |
+
|
| 460 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 461 |
+
raise ValueError(
|
| 462 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
if num_controlnet > 1:
|
| 466 |
+
if len(control_guidance_start) != num_controlnet:
|
| 467 |
+
raise ValueError(
|
| 468 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 472 |
+
if start >= end:
|
| 473 |
+
raise ValueError(
|
| 474 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 475 |
+
)
|
| 476 |
+
if start < 0.0:
|
| 477 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 478 |
+
if end > 1.0:
|
| 479 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 480 |
+
|
| 481 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 482 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 483 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 484 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 485 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 486 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 487 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 488 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 489 |
+
|
| 490 |
+
if (
|
| 491 |
+
not image_is_pil
|
| 492 |
+
and not image_is_tensor
|
| 493 |
+
and not image_is_np
|
| 494 |
+
and not image_is_pil_list
|
| 495 |
+
and not image_is_tensor_list
|
| 496 |
+
and not image_is_np_list
|
| 497 |
+
):
|
| 498 |
+
raise TypeError(
|
| 499 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
if image_is_pil:
|
| 503 |
+
image_batch_size = 1
|
| 504 |
+
else:
|
| 505 |
+
image_batch_size = len(image)
|
| 506 |
+
|
| 507 |
+
if prompt is not None and isinstance(prompt, str):
|
| 508 |
+
prompt_batch_size = 1
|
| 509 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 510 |
+
prompt_batch_size = len(prompt)
|
| 511 |
+
elif prompt_embeds is not None:
|
| 512 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 513 |
+
|
| 514 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 515 |
+
raise ValueError(
|
| 516 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 520 |
+
def prepare_control_image(
|
| 521 |
+
self,
|
| 522 |
+
image,
|
| 523 |
+
width,
|
| 524 |
+
height,
|
| 525 |
+
batch_size,
|
| 526 |
+
num_images_per_prompt,
|
| 527 |
+
device,
|
| 528 |
+
dtype,
|
| 529 |
+
do_classifier_free_guidance=False,
|
| 530 |
+
guess_mode=False,
|
| 531 |
+
):
|
| 532 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 533 |
+
image_batch_size = image.shape[0]
|
| 534 |
+
|
| 535 |
+
if image_batch_size == 1:
|
| 536 |
+
repeat_by = batch_size
|
| 537 |
+
else:
|
| 538 |
+
# image batch size is the same as prompt batch size
|
| 539 |
+
repeat_by = num_images_per_prompt
|
| 540 |
+
|
| 541 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 542 |
+
|
| 543 |
+
image = image.to(device=device, dtype=dtype)
|
| 544 |
+
|
| 545 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 546 |
+
image = torch.cat([image] * 2)
|
| 547 |
+
|
| 548 |
+
return image
|
| 549 |
+
|
| 550 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 551 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 552 |
+
# get the original timestep using init_timestep
|
| 553 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 554 |
+
|
| 555 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 556 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 557 |
+
|
| 558 |
+
return timesteps, num_inference_steps - t_start
|
| 559 |
+
|
| 560 |
+
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
| 561 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 562 |
+
raise ValueError(
|
| 563 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
image = image.to(device=device, dtype=dtype)
|
| 567 |
+
|
| 568 |
+
batch_size = batch_size * num_images_per_prompt
|
| 569 |
+
|
| 570 |
+
if image.shape[1] == 4:
|
| 571 |
+
init_latents = image
|
| 572 |
+
|
| 573 |
+
else:
|
| 574 |
+
_image = image.cpu().detach().numpy()
|
| 575 |
+
init_latents = self.vae_encoder(sample=_image)[0]
|
| 576 |
+
init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
|
| 577 |
+
init_latents = 0.18215 * init_latents
|
| 578 |
+
|
| 579 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 580 |
+
# expand init_latents for batch_size
|
| 581 |
+
deprecation_message = (
|
| 582 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 583 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 584 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 585 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 586 |
+
)
|
| 587 |
+
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 588 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 589 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 590 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 591 |
+
raise ValueError(
|
| 592 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 593 |
+
)
|
| 594 |
+
else:
|
| 595 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 596 |
+
|
| 597 |
+
shape = init_latents.shape
|
| 598 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 599 |
+
|
| 600 |
+
# get latents
|
| 601 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 602 |
+
latents = init_latents
|
| 603 |
+
|
| 604 |
+
return latents
|
| 605 |
+
|
| 606 |
+
@torch.no_grad()
|
| 607 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 608 |
+
def __call__(
|
| 609 |
+
self,
|
| 610 |
+
num_controlnet: int,
|
| 611 |
+
fp16: bool = True,
|
| 612 |
+
prompt: Union[str, List[str]] = None,
|
| 613 |
+
image: Union[
|
| 614 |
+
torch.FloatTensor,
|
| 615 |
+
PIL.Image.Image,
|
| 616 |
+
np.ndarray,
|
| 617 |
+
List[torch.FloatTensor],
|
| 618 |
+
List[PIL.Image.Image],
|
| 619 |
+
List[np.ndarray],
|
| 620 |
+
] = None,
|
| 621 |
+
control_image: Union[
|
| 622 |
+
torch.FloatTensor,
|
| 623 |
+
PIL.Image.Image,
|
| 624 |
+
np.ndarray,
|
| 625 |
+
List[torch.FloatTensor],
|
| 626 |
+
List[PIL.Image.Image],
|
| 627 |
+
List[np.ndarray],
|
| 628 |
+
] = None,
|
| 629 |
+
height: Optional[int] = None,
|
| 630 |
+
width: Optional[int] = None,
|
| 631 |
+
strength: float = 0.8,
|
| 632 |
+
num_inference_steps: int = 50,
|
| 633 |
+
guidance_scale: float = 7.5,
|
| 634 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 635 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 636 |
+
eta: float = 0.0,
|
| 637 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 638 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 639 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 640 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 641 |
+
output_type: Optional[str] = "pil",
|
| 642 |
+
return_dict: bool = True,
|
| 643 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 644 |
+
callback_steps: int = 1,
|
| 645 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 646 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 647 |
+
guess_mode: bool = False,
|
| 648 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 649 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 650 |
+
):
|
| 651 |
+
r"""
|
| 652 |
+
Function invoked when calling the pipeline for generation.
|
| 653 |
+
|
| 654 |
+
Args:
|
| 655 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 656 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 657 |
+
instead.
|
| 658 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 659 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 660 |
+
The initial image will be used as the starting point for the image generation process. Can also accept
|
| 661 |
+
image latents as `image`, if passing latents directly, it will not be encoded again.
|
| 662 |
+
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 663 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 664 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
| 665 |
+
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
| 666 |
+
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
|
| 667 |
+
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
|
| 668 |
+
specified in init, images must be passed as a list such that each element of the list can be correctly
|
| 669 |
+
batched for input to a single controlnet.
|
| 670 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 671 |
+
The height in pixels of the generated image.
|
| 672 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 673 |
+
The width in pixels of the generated image.
|
| 674 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 675 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 676 |
+
expense of slower inference.
|
| 677 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 678 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 679 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 680 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 681 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 682 |
+
usually at the expense of lower image quality.
|
| 683 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 684 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 685 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 686 |
+
less than `1`).
|
| 687 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 688 |
+
The number of images to generate per prompt.
|
| 689 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 690 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 691 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 692 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 693 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 694 |
+
to make generation deterministic.
|
| 695 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 696 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 697 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 698 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 699 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 700 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 701 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 702 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 703 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 704 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 705 |
+
argument.
|
| 706 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 707 |
+
The output format of the generate image. Choose between
|
| 708 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 709 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 710 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 711 |
+
plain tuple.
|
| 712 |
+
callback (`Callable`, *optional*):
|
| 713 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 714 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 715 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 716 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 717 |
+
called at every step.
|
| 718 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 719 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 720 |
+
`self.processor` in
|
| 721 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 722 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 723 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 724 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 725 |
+
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
|
| 726 |
+
than for [`~StableDiffusionControlNetPipeline.__call__`].
|
| 727 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 728 |
+
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
| 729 |
+
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
| 730 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 731 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 732 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 733 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 734 |
+
|
| 735 |
+
Examples:
|
| 736 |
+
|
| 737 |
+
Returns:
|
| 738 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 739 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 740 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 741 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 742 |
+
(nsfw) content, according to the `safety_checker`.
|
| 743 |
+
"""
|
| 744 |
+
if fp16:
|
| 745 |
+
torch_dtype = torch.float16
|
| 746 |
+
np_dtype = np.float16
|
| 747 |
+
else:
|
| 748 |
+
torch_dtype = torch.float32
|
| 749 |
+
np_dtype = np.float32
|
| 750 |
+
|
| 751 |
+
# align format for control guidance
|
| 752 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 753 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 754 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 755 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 756 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 757 |
+
mult = num_controlnet
|
| 758 |
+
control_guidance_start, control_guidance_end = (
|
| 759 |
+
mult * [control_guidance_start],
|
| 760 |
+
mult * [control_guidance_end],
|
| 761 |
+
)
|
| 762 |
+
|
| 763 |
+
# 1. Check inputs. Raise error if not correct
|
| 764 |
+
self.check_inputs(
|
| 765 |
+
num_controlnet,
|
| 766 |
+
prompt,
|
| 767 |
+
control_image,
|
| 768 |
+
callback_steps,
|
| 769 |
+
negative_prompt,
|
| 770 |
+
prompt_embeds,
|
| 771 |
+
negative_prompt_embeds,
|
| 772 |
+
controlnet_conditioning_scale,
|
| 773 |
+
control_guidance_start,
|
| 774 |
+
control_guidance_end,
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
# 2. Define call parameters
|
| 778 |
+
if prompt is not None and isinstance(prompt, str):
|
| 779 |
+
batch_size = 1
|
| 780 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 781 |
+
batch_size = len(prompt)
|
| 782 |
+
else:
|
| 783 |
+
batch_size = prompt_embeds.shape[0]
|
| 784 |
+
|
| 785 |
+
device = self._execution_device
|
| 786 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 787 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 788 |
+
# corresponds to doing no classifier free guidance.
|
| 789 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 790 |
+
|
| 791 |
+
if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
|
| 792 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
|
| 793 |
+
|
| 794 |
+
# 3. Encode input prompt
|
| 795 |
+
prompt_embeds = self._encode_prompt(
|
| 796 |
+
prompt,
|
| 797 |
+
num_images_per_prompt,
|
| 798 |
+
do_classifier_free_guidance,
|
| 799 |
+
negative_prompt,
|
| 800 |
+
prompt_embeds=prompt_embeds,
|
| 801 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 802 |
+
)
|
| 803 |
+
# 4. Prepare image
|
| 804 |
+
image = self.image_processor.preprocess(image).to(dtype=torch.float32)
|
| 805 |
+
|
| 806 |
+
# 5. Prepare controlnet_conditioning_image
|
| 807 |
+
if num_controlnet == 1:
|
| 808 |
+
control_image = self.prepare_control_image(
|
| 809 |
+
image=control_image,
|
| 810 |
+
width=width,
|
| 811 |
+
height=height,
|
| 812 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 813 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 814 |
+
device=device,
|
| 815 |
+
dtype=torch_dtype,
|
| 816 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 817 |
+
guess_mode=guess_mode,
|
| 818 |
+
)
|
| 819 |
+
elif num_controlnet > 1:
|
| 820 |
+
control_images = []
|
| 821 |
+
|
| 822 |
+
for control_image_ in control_image:
|
| 823 |
+
control_image_ = self.prepare_control_image(
|
| 824 |
+
image=control_image_,
|
| 825 |
+
width=width,
|
| 826 |
+
height=height,
|
| 827 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 828 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 829 |
+
device=device,
|
| 830 |
+
dtype=torch_dtype,
|
| 831 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 832 |
+
guess_mode=guess_mode,
|
| 833 |
+
)
|
| 834 |
+
|
| 835 |
+
control_images.append(control_image_)
|
| 836 |
+
|
| 837 |
+
control_image = control_images
|
| 838 |
+
else:
|
| 839 |
+
assert False
|
| 840 |
+
|
| 841 |
+
# 5. Prepare timesteps
|
| 842 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 843 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 844 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 845 |
+
|
| 846 |
+
# 6. Prepare latent variables
|
| 847 |
+
latents = self.prepare_latents(
|
| 848 |
+
image,
|
| 849 |
+
latent_timestep,
|
| 850 |
+
batch_size,
|
| 851 |
+
num_images_per_prompt,
|
| 852 |
+
torch_dtype,
|
| 853 |
+
device,
|
| 854 |
+
generator,
|
| 855 |
+
)
|
| 856 |
+
|
| 857 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 858 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 859 |
+
|
| 860 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 861 |
+
controlnet_keep = []
|
| 862 |
+
for i in range(len(timesteps)):
|
| 863 |
+
keeps = [
|
| 864 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 865 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 866 |
+
]
|
| 867 |
+
controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
|
| 868 |
+
|
| 869 |
+
# 8. Denoising loop
|
| 870 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 871 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 872 |
+
for i, t in enumerate(timesteps):
|
| 873 |
+
# expand the latents if we are doing classifier free guidance
|
| 874 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 875 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 876 |
+
|
| 877 |
+
if isinstance(controlnet_keep[i], list):
|
| 878 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 879 |
+
else:
|
| 880 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 881 |
+
if isinstance(controlnet_cond_scale, list):
|
| 882 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 883 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 884 |
+
|
| 885 |
+
# predict the noise residual
|
| 886 |
+
_latent_model_input = latent_model_input.cpu().detach().numpy()
|
| 887 |
+
_prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
|
| 888 |
+
_t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
|
| 889 |
+
|
| 890 |
+
if num_controlnet == 1:
|
| 891 |
+
control_images = np.array([control_image], dtype=np_dtype)
|
| 892 |
+
else:
|
| 893 |
+
control_images = []
|
| 894 |
+
for _control_img in control_image:
|
| 895 |
+
_control_img = _control_img.cpu().detach().numpy()
|
| 896 |
+
control_images.append(_control_img)
|
| 897 |
+
control_images = np.array(control_images, dtype=np_dtype)
|
| 898 |
+
|
| 899 |
+
control_scales = np.array(cond_scale, dtype=np_dtype)
|
| 900 |
+
control_scales = np.resize(control_scales, (num_controlnet, 1))
|
| 901 |
+
|
| 902 |
+
noise_pred = self.unet(
|
| 903 |
+
sample=_latent_model_input,
|
| 904 |
+
timestep=_t,
|
| 905 |
+
encoder_hidden_states=_prompt_embeds,
|
| 906 |
+
controlnet_conds=control_images,
|
| 907 |
+
conditioning_scales=control_scales,
|
| 908 |
+
)["noise_pred"]
|
| 909 |
+
noise_pred = torch.from_numpy(noise_pred).to(device)
|
| 910 |
+
|
| 911 |
+
# perform guidance
|
| 912 |
+
if do_classifier_free_guidance:
|
| 913 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 914 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 915 |
+
|
| 916 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 917 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 918 |
+
|
| 919 |
+
# call the callback, if provided
|
| 920 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 921 |
+
progress_bar.update()
|
| 922 |
+
if callback is not None and i % callback_steps == 0:
|
| 923 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 924 |
+
callback(step_idx, t, latents)
|
| 925 |
+
|
| 926 |
+
if not output_type == "latent":
|
| 927 |
+
_latents = latents.cpu().detach().numpy() / 0.18215
|
| 928 |
+
_latents = np.array(_latents, dtype=np_dtype)
|
| 929 |
+
image = self.vae_decoder(latent_sample=_latents)[0]
|
| 930 |
+
image = torch.from_numpy(image).to(device, dtype=torch.float32)
|
| 931 |
+
has_nsfw_concept = None
|
| 932 |
+
else:
|
| 933 |
+
image = latents
|
| 934 |
+
has_nsfw_concept = None
|
| 935 |
+
|
| 936 |
+
if has_nsfw_concept is None:
|
| 937 |
+
do_denormalize = [True] * image.shape[0]
|
| 938 |
+
else:
|
| 939 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 940 |
+
|
| 941 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 942 |
+
|
| 943 |
+
if not return_dict:
|
| 944 |
+
return (image, has_nsfw_concept)
|
| 945 |
+
|
| 946 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
if __name__ == "__main__":
|
| 950 |
+
parser = argparse.ArgumentParser()
|
| 951 |
+
|
| 952 |
+
parser.add_argument(
|
| 953 |
+
"--sd_model",
|
| 954 |
+
type=str,
|
| 955 |
+
required=True,
|
| 956 |
+
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
|
| 957 |
+
)
|
| 958 |
+
|
| 959 |
+
parser.add_argument(
|
| 960 |
+
"--onnx_model_dir",
|
| 961 |
+
type=str,
|
| 962 |
+
required=True,
|
| 963 |
+
help="Path to the ONNX directory",
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
parser.add_argument(
|
| 967 |
+
"--unet_engine_path",
|
| 968 |
+
type=str,
|
| 969 |
+
required=True,
|
| 970 |
+
help="Path to the unet + controlnet tensorrt model",
|
| 971 |
+
)
|
| 972 |
+
|
| 973 |
+
parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
|
| 974 |
+
|
| 975 |
+
args = parser.parse_args()
|
| 976 |
+
|
| 977 |
+
qr_image = Image.open(args.qr_img_path)
|
| 978 |
+
qr_image = qr_image.resize((512, 512))
|
| 979 |
+
|
| 980 |
+
# init stable diffusion pipeline
|
| 981 |
+
pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
|
| 982 |
+
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
| 983 |
+
|
| 984 |
+
provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
| 985 |
+
onnx_pipeline = TensorRTStableDiffusionControlNetImg2ImgPipeline(
|
| 986 |
+
vae_encoder=OnnxRuntimeModel.from_pretrained(
|
| 987 |
+
os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
|
| 988 |
+
),
|
| 989 |
+
vae_decoder=OnnxRuntimeModel.from_pretrained(
|
| 990 |
+
os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
|
| 991 |
+
),
|
| 992 |
+
text_encoder=OnnxRuntimeModel.from_pretrained(
|
| 993 |
+
os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
|
| 994 |
+
),
|
| 995 |
+
tokenizer=pipeline.tokenizer,
|
| 996 |
+
unet=TensorRTModel(args.unet_engine_path),
|
| 997 |
+
scheduler=pipeline.scheduler,
|
| 998 |
+
)
|
| 999 |
+
onnx_pipeline = onnx_pipeline.to("cuda")
|
| 1000 |
+
|
| 1001 |
+
prompt = "a cute cat fly to the moon"
|
| 1002 |
+
negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
|
| 1003 |
+
|
| 1004 |
+
for i in range(10):
|
| 1005 |
+
start_time = time.time()
|
| 1006 |
+
image = onnx_pipeline(
|
| 1007 |
+
num_controlnet=2,
|
| 1008 |
+
prompt=prompt,
|
| 1009 |
+
negative_prompt=negative_prompt,
|
| 1010 |
+
image=qr_image,
|
| 1011 |
+
control_image=[qr_image, qr_image],
|
| 1012 |
+
width=512,
|
| 1013 |
+
height=512,
|
| 1014 |
+
strength=0.75,
|
| 1015 |
+
num_inference_steps=20,
|
| 1016 |
+
num_images_per_prompt=1,
|
| 1017 |
+
controlnet_conditioning_scale=[0.8, 0.8],
|
| 1018 |
+
control_guidance_start=[0.3, 0.3],
|
| 1019 |
+
control_guidance_end=[0.9, 0.9],
|
| 1020 |
+
).images[0]
|
| 1021 |
+
print(time.time() - start_time)
|
| 1022 |
+
image.save("output_qr_code.png")
|
v0.27.0/scheduling_ufogen.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import List, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 25 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
| 26 |
+
from diffusers.utils import BaseOutput
|
| 27 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UFOGen
|
| 32 |
+
class UFOGenSchedulerOutput(BaseOutput):
|
| 33 |
+
"""
|
| 34 |
+
Output class for the scheduler's `step` function output.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 38 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 39 |
+
denoising loop.
|
| 40 |
+
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 41 |
+
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
|
| 42 |
+
`pred_original_sample` can be used to preview progress or for guidance.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
prev_sample: torch.FloatTensor
|
| 46 |
+
pred_original_sample: Optional[torch.FloatTensor] = None
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
| 50 |
+
def betas_for_alpha_bar(
|
| 51 |
+
num_diffusion_timesteps,
|
| 52 |
+
max_beta=0.999,
|
| 53 |
+
alpha_transform_type="cosine",
|
| 54 |
+
):
|
| 55 |
+
"""
|
| 56 |
+
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
| 57 |
+
(1-beta) over time from t = [0,1].
|
| 58 |
+
|
| 59 |
+
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
| 60 |
+
to that part of the diffusion process.
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
num_diffusion_timesteps (`int`): the number of betas to produce.
|
| 65 |
+
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
| 66 |
+
prevent singularities.
|
| 67 |
+
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
| 68 |
+
Choose from `cosine` or `exp`
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
| 72 |
+
"""
|
| 73 |
+
if alpha_transform_type == "cosine":
|
| 74 |
+
|
| 75 |
+
def alpha_bar_fn(t):
|
| 76 |
+
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
| 77 |
+
|
| 78 |
+
elif alpha_transform_type == "exp":
|
| 79 |
+
|
| 80 |
+
def alpha_bar_fn(t):
|
| 81 |
+
return math.exp(t * -12.0)
|
| 82 |
+
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
|
| 85 |
+
|
| 86 |
+
betas = []
|
| 87 |
+
for i in range(num_diffusion_timesteps):
|
| 88 |
+
t1 = i / num_diffusion_timesteps
|
| 89 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 90 |
+
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
| 91 |
+
return torch.tensor(betas, dtype=torch.float32)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
|
| 95 |
+
def rescale_zero_terminal_snr(betas):
|
| 96 |
+
"""
|
| 97 |
+
Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
betas (`torch.FloatTensor`):
|
| 102 |
+
the betas that the scheduler is being initialized with.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
`torch.FloatTensor`: rescaled betas with zero terminal SNR
|
| 106 |
+
"""
|
| 107 |
+
# Convert betas to alphas_bar_sqrt
|
| 108 |
+
alphas = 1.0 - betas
|
| 109 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
| 110 |
+
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
| 111 |
+
|
| 112 |
+
# Store old values.
|
| 113 |
+
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
| 114 |
+
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
| 115 |
+
|
| 116 |
+
# Shift so the last timestep is zero.
|
| 117 |
+
alphas_bar_sqrt -= alphas_bar_sqrt_T
|
| 118 |
+
|
| 119 |
+
# Scale so the first timestep is back to the old value.
|
| 120 |
+
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
| 121 |
+
|
| 122 |
+
# Convert alphas_bar_sqrt to betas
|
| 123 |
+
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
| 124 |
+
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
|
| 125 |
+
alphas = torch.cat([alphas_bar[0:1], alphas])
|
| 126 |
+
betas = 1 - alphas
|
| 127 |
+
|
| 128 |
+
return betas
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class UFOGenScheduler(SchedulerMixin, ConfigMixin):
|
| 132 |
+
"""
|
| 133 |
+
`UFOGenScheduler` implements multistep and onestep sampling for a UFOGen model, introduced in
|
| 134 |
+
[UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs](https://arxiv.org/abs/2311.09257)
|
| 135 |
+
by Yanwu Xu, Yang Zhao, Zhisheng Xiao, and Tingbo Hou. UFOGen is a varianet of the denoising diffusion GAN (DDGAN)
|
| 136 |
+
model designed for one-step sampling.
|
| 137 |
+
|
| 138 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 139 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 143 |
+
The number of diffusion steps to train the model.
|
| 144 |
+
beta_start (`float`, defaults to 0.0001):
|
| 145 |
+
The starting `beta` value of inference.
|
| 146 |
+
beta_end (`float`, defaults to 0.02):
|
| 147 |
+
The final `beta` value.
|
| 148 |
+
beta_schedule (`str`, defaults to `"linear"`):
|
| 149 |
+
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
| 150 |
+
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
| 151 |
+
clip_sample (`bool`, defaults to `True`):
|
| 152 |
+
Clip the predicted sample for numerical stability.
|
| 153 |
+
clip_sample_range (`float`, defaults to 1.0):
|
| 154 |
+
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
|
| 155 |
+
set_alpha_to_one (`bool`, defaults to `True`):
|
| 156 |
+
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
|
| 157 |
+
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
|
| 158 |
+
otherwise it uses the alpha value at step 0.
|
| 159 |
+
prediction_type (`str`, defaults to `epsilon`, *optional*):
|
| 160 |
+
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
|
| 161 |
+
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
|
| 162 |
+
Video](https://imagen.research.google/video/paper.pdf) paper).
|
| 163 |
+
thresholding (`bool`, defaults to `False`):
|
| 164 |
+
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
|
| 165 |
+
as Stable Diffusion.
|
| 166 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 167 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 168 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 169 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
|
| 170 |
+
timestep_spacing (`str`, defaults to `"leading"`):
|
| 171 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 172 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 173 |
+
steps_offset (`int`, defaults to 0):
|
| 174 |
+
An offset added to the inference steps, as required by some model families.
|
| 175 |
+
rescale_betas_zero_snr (`bool`, defaults to `False`):
|
| 176 |
+
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
|
| 177 |
+
dark samples instead of limiting it to samples with medium brightness. Loosely related to
|
| 178 |
+
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
|
| 179 |
+
denoising_step_size (`int`, defaults to 250):
|
| 180 |
+
The denoising step size parameter from the UFOGen paper. The number of steps used for training is roughly
|
| 181 |
+
`math.ceil(num_train_timesteps / denoising_step_size)`.
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
order = 1
|
| 185 |
+
|
| 186 |
+
@register_to_config
|
| 187 |
+
def __init__(
|
| 188 |
+
self,
|
| 189 |
+
num_train_timesteps: int = 1000,
|
| 190 |
+
beta_start: float = 0.0001,
|
| 191 |
+
beta_end: float = 0.02,
|
| 192 |
+
beta_schedule: str = "linear",
|
| 193 |
+
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
| 194 |
+
clip_sample: bool = True,
|
| 195 |
+
set_alpha_to_one: bool = True,
|
| 196 |
+
prediction_type: str = "epsilon",
|
| 197 |
+
thresholding: bool = False,
|
| 198 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 199 |
+
clip_sample_range: float = 1.0,
|
| 200 |
+
sample_max_value: float = 1.0,
|
| 201 |
+
timestep_spacing: str = "leading",
|
| 202 |
+
steps_offset: int = 0,
|
| 203 |
+
rescale_betas_zero_snr: bool = False,
|
| 204 |
+
denoising_step_size: int = 250,
|
| 205 |
+
):
|
| 206 |
+
if trained_betas is not None:
|
| 207 |
+
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
| 208 |
+
elif beta_schedule == "linear":
|
| 209 |
+
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
| 210 |
+
elif beta_schedule == "scaled_linear":
|
| 211 |
+
# this schedule is very specific to the latent diffusion model.
|
| 212 |
+
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
| 213 |
+
elif beta_schedule == "squaredcos_cap_v2":
|
| 214 |
+
# Glide cosine schedule
|
| 215 |
+
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
| 216 |
+
elif beta_schedule == "sigmoid":
|
| 217 |
+
# GeoDiff sigmoid schedule
|
| 218 |
+
betas = torch.linspace(-6, 6, num_train_timesteps)
|
| 219 |
+
self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
| 220 |
+
else:
|
| 221 |
+
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
| 222 |
+
|
| 223 |
+
# Rescale for zero SNR
|
| 224 |
+
if rescale_betas_zero_snr:
|
| 225 |
+
self.betas = rescale_zero_terminal_snr(self.betas)
|
| 226 |
+
|
| 227 |
+
self.alphas = 1.0 - self.betas
|
| 228 |
+
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
| 229 |
+
|
| 230 |
+
# For the final step, there is no previous alphas_cumprod because we are already at 0
|
| 231 |
+
# `set_alpha_to_one` decides whether we set this parameter simply to one or
|
| 232 |
+
# whether we use the final alpha of the "non-previous" one.
|
| 233 |
+
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
|
| 234 |
+
|
| 235 |
+
# standard deviation of the initial noise distribution
|
| 236 |
+
self.init_noise_sigma = 1.0
|
| 237 |
+
|
| 238 |
+
# setable values
|
| 239 |
+
self.custom_timesteps = False
|
| 240 |
+
self.num_inference_steps = None
|
| 241 |
+
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
|
| 242 |
+
|
| 243 |
+
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
|
| 244 |
+
"""
|
| 245 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 246 |
+
current timestep.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
sample (`torch.FloatTensor`):
|
| 250 |
+
The input sample.
|
| 251 |
+
timestep (`int`, *optional*):
|
| 252 |
+
The current timestep in the diffusion chain.
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
`torch.FloatTensor`:
|
| 256 |
+
A scaled input sample.
|
| 257 |
+
"""
|
| 258 |
+
return sample
|
| 259 |
+
|
| 260 |
+
def set_timesteps(
|
| 261 |
+
self,
|
| 262 |
+
num_inference_steps: Optional[int] = None,
|
| 263 |
+
device: Union[str, torch.device] = None,
|
| 264 |
+
timesteps: Optional[List[int]] = None,
|
| 265 |
+
):
|
| 266 |
+
"""
|
| 267 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
num_inference_steps (`int`):
|
| 271 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 272 |
+
`timesteps` must be `None`.
|
| 273 |
+
device (`str` or `torch.device`, *optional*):
|
| 274 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 275 |
+
timesteps (`List[int]`, *optional*):
|
| 276 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 277 |
+
timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed,
|
| 278 |
+
`num_inference_steps` must be `None`.
|
| 279 |
+
|
| 280 |
+
"""
|
| 281 |
+
if num_inference_steps is not None and timesteps is not None:
|
| 282 |
+
raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.")
|
| 283 |
+
|
| 284 |
+
if timesteps is not None:
|
| 285 |
+
for i in range(1, len(timesteps)):
|
| 286 |
+
if timesteps[i] >= timesteps[i - 1]:
|
| 287 |
+
raise ValueError("`custom_timesteps` must be in descending order.")
|
| 288 |
+
|
| 289 |
+
if timesteps[0] >= self.config.num_train_timesteps:
|
| 290 |
+
raise ValueError(
|
| 291 |
+
f"`timesteps` must start before `self.config.train_timesteps`:"
|
| 292 |
+
f" {self.config.num_train_timesteps}."
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
timesteps = np.array(timesteps, dtype=np.int64)
|
| 296 |
+
self.custom_timesteps = True
|
| 297 |
+
else:
|
| 298 |
+
if num_inference_steps > self.config.num_train_timesteps:
|
| 299 |
+
raise ValueError(
|
| 300 |
+
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
|
| 301 |
+
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
|
| 302 |
+
f" maximal {self.config.num_train_timesteps} timesteps."
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
self.num_inference_steps = num_inference_steps
|
| 306 |
+
self.custom_timesteps = False
|
| 307 |
+
|
| 308 |
+
# TODO: For now, handle special case when num_inference_steps == 1 separately
|
| 309 |
+
if num_inference_steps == 1:
|
| 310 |
+
# Set the timestep schedule to num_train_timesteps - 1 rather than 0
|
| 311 |
+
# (that is, the one-step timestep schedule is always trailing rather than leading or linspace)
|
| 312 |
+
timesteps = np.array([self.config.num_train_timesteps - 1], dtype=np.int64)
|
| 313 |
+
else:
|
| 314 |
+
# TODO: For now, retain the DDPM timestep spacing logic
|
| 315 |
+
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
|
| 316 |
+
if self.config.timestep_spacing == "linspace":
|
| 317 |
+
timesteps = (
|
| 318 |
+
np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
|
| 319 |
+
.round()[::-1]
|
| 320 |
+
.copy()
|
| 321 |
+
.astype(np.int64)
|
| 322 |
+
)
|
| 323 |
+
elif self.config.timestep_spacing == "leading":
|
| 324 |
+
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
|
| 325 |
+
# creates integer timesteps by multiplying by ratio
|
| 326 |
+
# casting to int to avoid issues when num_inference_step is power of 3
|
| 327 |
+
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
|
| 328 |
+
timesteps += self.config.steps_offset
|
| 329 |
+
elif self.config.timestep_spacing == "trailing":
|
| 330 |
+
step_ratio = self.config.num_train_timesteps / self.num_inference_steps
|
| 331 |
+
# creates integer timesteps by multiplying by ratio
|
| 332 |
+
# casting to int to avoid issues when num_inference_step is power of 3
|
| 333 |
+
timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
|
| 334 |
+
timesteps -= 1
|
| 335 |
+
else:
|
| 336 |
+
raise ValueError(
|
| 337 |
+
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
self.timesteps = torch.from_numpy(timesteps).to(device)
|
| 341 |
+
|
| 342 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 343 |
+
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
|
| 344 |
+
"""
|
| 345 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 346 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 347 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 348 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 349 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 350 |
+
|
| 351 |
+
https://arxiv.org/abs/2205.11487
|
| 352 |
+
"""
|
| 353 |
+
dtype = sample.dtype
|
| 354 |
+
batch_size, channels, *remaining_dims = sample.shape
|
| 355 |
+
|
| 356 |
+
if dtype not in (torch.float32, torch.float64):
|
| 357 |
+
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 358 |
+
|
| 359 |
+
# Flatten sample for doing quantile calculation along each image
|
| 360 |
+
sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
|
| 361 |
+
|
| 362 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 363 |
+
|
| 364 |
+
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 365 |
+
s = torch.clamp(
|
| 366 |
+
s, min=1, max=self.config.sample_max_value
|
| 367 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 368 |
+
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 369 |
+
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 370 |
+
|
| 371 |
+
sample = sample.reshape(batch_size, channels, *remaining_dims)
|
| 372 |
+
sample = sample.to(dtype)
|
| 373 |
+
|
| 374 |
+
return sample
|
| 375 |
+
|
| 376 |
+
def step(
|
| 377 |
+
self,
|
| 378 |
+
model_output: torch.FloatTensor,
|
| 379 |
+
timestep: int,
|
| 380 |
+
sample: torch.FloatTensor,
|
| 381 |
+
generator: Optional[torch.Generator] = None,
|
| 382 |
+
return_dict: bool = True,
|
| 383 |
+
) -> Union[UFOGenSchedulerOutput, Tuple]:
|
| 384 |
+
"""
|
| 385 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 386 |
+
process from the learned model outputs (most often the predicted noise).
|
| 387 |
+
|
| 388 |
+
Args:
|
| 389 |
+
model_output (`torch.FloatTensor`):
|
| 390 |
+
The direct output from learned diffusion model.
|
| 391 |
+
timestep (`float`):
|
| 392 |
+
The current discrete timestep in the diffusion chain.
|
| 393 |
+
sample (`torch.FloatTensor`):
|
| 394 |
+
A current instance of a sample created by the diffusion process.
|
| 395 |
+
generator (`torch.Generator`, *optional*):
|
| 396 |
+
A random number generator.
|
| 397 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 398 |
+
Whether or not to return a [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] or `tuple`.
|
| 399 |
+
|
| 400 |
+
Returns:
|
| 401 |
+
[`~schedulers.scheduling_ddpm.UFOGenSchedulerOutput`] or `tuple`:
|
| 402 |
+
If return_dict is `True`, [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] is returned, otherwise a
|
| 403 |
+
tuple is returned where the first element is the sample tensor.
|
| 404 |
+
|
| 405 |
+
"""
|
| 406 |
+
# 0. Resolve timesteps
|
| 407 |
+
t = timestep
|
| 408 |
+
prev_t = self.previous_timestep(t)
|
| 409 |
+
|
| 410 |
+
# 1. compute alphas, betas
|
| 411 |
+
alpha_prod_t = self.alphas_cumprod[t]
|
| 412 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.final_alpha_cumprod
|
| 413 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 414 |
+
# beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 415 |
+
# current_alpha_t = alpha_prod_t / alpha_prod_t_prev
|
| 416 |
+
# current_beta_t = 1 - current_alpha_t
|
| 417 |
+
|
| 418 |
+
# 2. compute predicted original sample from predicted noise also called
|
| 419 |
+
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
|
| 420 |
+
if self.config.prediction_type == "epsilon":
|
| 421 |
+
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
| 422 |
+
elif self.config.prediction_type == "sample":
|
| 423 |
+
pred_original_sample = model_output
|
| 424 |
+
elif self.config.prediction_type == "v_prediction":
|
| 425 |
+
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
|
| 426 |
+
else:
|
| 427 |
+
raise ValueError(
|
| 428 |
+
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
|
| 429 |
+
" `v_prediction` for UFOGenScheduler."
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# 3. Clip or threshold "predicted x_0"
|
| 433 |
+
if self.config.thresholding:
|
| 434 |
+
pred_original_sample = self._threshold_sample(pred_original_sample)
|
| 435 |
+
elif self.config.clip_sample:
|
| 436 |
+
pred_original_sample = pred_original_sample.clamp(
|
| 437 |
+
-self.config.clip_sample_range, self.config.clip_sample_range
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
# 4. Single-step or multi-step sampling
|
| 441 |
+
# Noise is not used on the final timestep of the timestep schedule.
|
| 442 |
+
# This also means that noise is not used for one-step sampling.
|
| 443 |
+
if t != self.timesteps[-1]:
|
| 444 |
+
# TODO: is this correct?
|
| 445 |
+
# Sample prev sample x_{t - 1} ~ q(x_{t - 1} | x_0 = G(x_t, t))
|
| 446 |
+
device = model_output.device
|
| 447 |
+
noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype)
|
| 448 |
+
sqrt_alpha_prod_t_prev = alpha_prod_t_prev**0.5
|
| 449 |
+
sqrt_one_minus_alpha_prod_t_prev = (1 - alpha_prod_t_prev) ** 0.5
|
| 450 |
+
pred_prev_sample = sqrt_alpha_prod_t_prev * pred_original_sample + sqrt_one_minus_alpha_prod_t_prev * noise
|
| 451 |
+
else:
|
| 452 |
+
# Simply return the pred_original_sample. If `prediction_type == "sample"`, this is equivalent to returning
|
| 453 |
+
# the output of the GAN generator U-Net on the initial noisy latents x_T ~ N(0, I).
|
| 454 |
+
pred_prev_sample = pred_original_sample
|
| 455 |
+
|
| 456 |
+
if not return_dict:
|
| 457 |
+
return (pred_prev_sample,)
|
| 458 |
+
|
| 459 |
+
return UFOGenSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
|
| 460 |
+
|
| 461 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
| 462 |
+
def add_noise(
|
| 463 |
+
self,
|
| 464 |
+
original_samples: torch.FloatTensor,
|
| 465 |
+
noise: torch.FloatTensor,
|
| 466 |
+
timesteps: torch.IntTensor,
|
| 467 |
+
) -> torch.FloatTensor:
|
| 468 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
| 469 |
+
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
| 470 |
+
timesteps = timesteps.to(original_samples.device)
|
| 471 |
+
|
| 472 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 473 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 474 |
+
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
| 475 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 476 |
+
|
| 477 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 478 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 479 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
| 480 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 481 |
+
|
| 482 |
+
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
| 483 |
+
return noisy_samples
|
| 484 |
+
|
| 485 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
|
| 486 |
+
def get_velocity(
|
| 487 |
+
self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
|
| 488 |
+
) -> torch.FloatTensor:
|
| 489 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as sample
|
| 490 |
+
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
|
| 491 |
+
timesteps = timesteps.to(sample.device)
|
| 492 |
+
|
| 493 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 494 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 495 |
+
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
| 496 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 497 |
+
|
| 498 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 499 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 500 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
| 501 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 502 |
+
|
| 503 |
+
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
| 504 |
+
return velocity
|
| 505 |
+
|
| 506 |
+
def __len__(self):
|
| 507 |
+
return self.config.num_train_timesteps
|
| 508 |
+
|
| 509 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep
|
| 510 |
+
def previous_timestep(self, timestep):
|
| 511 |
+
if self.custom_timesteps:
|
| 512 |
+
index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0]
|
| 513 |
+
if index == self.timesteps.shape[0] - 1:
|
| 514 |
+
prev_t = torch.tensor(-1)
|
| 515 |
+
else:
|
| 516 |
+
prev_t = self.timesteps[index + 1]
|
| 517 |
+
else:
|
| 518 |
+
num_inference_steps = (
|
| 519 |
+
self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
|
| 520 |
+
)
|
| 521 |
+
prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
|
| 522 |
+
|
| 523 |
+
return prev_t
|
v0.27.0/sd_text2img_k_diffusion.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import importlib
|
| 16 |
+
import warnings
|
| 17 |
+
from typing import Callable, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
|
| 21 |
+
|
| 22 |
+
from diffusers import DiffusionPipeline, LMSDiscreteScheduler, StableDiffusionMixin
|
| 23 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 24 |
+
from diffusers.utils import logging
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ModelWrapper:
|
| 31 |
+
def __init__(self, model, alphas_cumprod):
|
| 32 |
+
self.model = model
|
| 33 |
+
self.alphas_cumprod = alphas_cumprod
|
| 34 |
+
|
| 35 |
+
def apply_model(self, *args, **kwargs):
|
| 36 |
+
if len(args) == 3:
|
| 37 |
+
encoder_hidden_states = args[-1]
|
| 38 |
+
args = args[:2]
|
| 39 |
+
if kwargs.get("cond", None) is not None:
|
| 40 |
+
encoder_hidden_states = kwargs.pop("cond")
|
| 41 |
+
return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 45 |
+
r"""
|
| 46 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 47 |
+
|
| 48 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 49 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
vae ([`AutoencoderKL`]):
|
| 53 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 54 |
+
text_encoder ([`CLIPTextModel`]):
|
| 55 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 56 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 57 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 58 |
+
tokenizer (`CLIPTokenizer`):
|
| 59 |
+
Tokenizer of class
|
| 60 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 61 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 62 |
+
scheduler ([`SchedulerMixin`]):
|
| 63 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 64 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 65 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 66 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 67 |
+
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
| 68 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 69 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 73 |
+
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
vae,
|
| 77 |
+
text_encoder,
|
| 78 |
+
tokenizer,
|
| 79 |
+
unet,
|
| 80 |
+
scheduler,
|
| 81 |
+
safety_checker,
|
| 82 |
+
feature_extractor,
|
| 83 |
+
):
|
| 84 |
+
super().__init__()
|
| 85 |
+
|
| 86 |
+
if safety_checker is None:
|
| 87 |
+
logger.warning(
|
| 88 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 89 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 90 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 91 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 92 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 93 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# get correct sigmas from LMS
|
| 97 |
+
scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
|
| 98 |
+
self.register_modules(
|
| 99 |
+
vae=vae,
|
| 100 |
+
text_encoder=text_encoder,
|
| 101 |
+
tokenizer=tokenizer,
|
| 102 |
+
unet=unet,
|
| 103 |
+
scheduler=scheduler,
|
| 104 |
+
safety_checker=safety_checker,
|
| 105 |
+
feature_extractor=feature_extractor,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
model = ModelWrapper(unet, scheduler.alphas_cumprod)
|
| 109 |
+
if scheduler.config.prediction_type == "v_prediction":
|
| 110 |
+
self.k_diffusion_model = CompVisVDenoiser(model)
|
| 111 |
+
else:
|
| 112 |
+
self.k_diffusion_model = CompVisDenoiser(model)
|
| 113 |
+
|
| 114 |
+
def set_sampler(self, scheduler_type: str):
|
| 115 |
+
warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
|
| 116 |
+
return self.set_scheduler(scheduler_type)
|
| 117 |
+
|
| 118 |
+
def set_scheduler(self, scheduler_type: str):
|
| 119 |
+
library = importlib.import_module("k_diffusion")
|
| 120 |
+
sampling = getattr(library, "sampling")
|
| 121 |
+
self.sampler = getattr(sampling, scheduler_type)
|
| 122 |
+
|
| 123 |
+
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
| 124 |
+
r"""
|
| 125 |
+
Encodes the prompt into text encoder hidden states.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
prompt (`str` or `list(int)`):
|
| 129 |
+
prompt to be encoded
|
| 130 |
+
device: (`torch.device`):
|
| 131 |
+
torch device
|
| 132 |
+
num_images_per_prompt (`int`):
|
| 133 |
+
number of images that should be generated per prompt
|
| 134 |
+
do_classifier_free_guidance (`bool`):
|
| 135 |
+
whether to use classifier free guidance or not
|
| 136 |
+
negative_prompt (`str` or `List[str]`):
|
| 137 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 138 |
+
if `guidance_scale` is less than `1`).
|
| 139 |
+
"""
|
| 140 |
+
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
| 141 |
+
|
| 142 |
+
text_inputs = self.tokenizer(
|
| 143 |
+
prompt,
|
| 144 |
+
padding="max_length",
|
| 145 |
+
max_length=self.tokenizer.model_max_length,
|
| 146 |
+
truncation=True,
|
| 147 |
+
return_tensors="pt",
|
| 148 |
+
)
|
| 149 |
+
text_input_ids = text_inputs.input_ids
|
| 150 |
+
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
|
| 151 |
+
|
| 152 |
+
if not torch.equal(text_input_ids, untruncated_ids):
|
| 153 |
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
| 154 |
+
logger.warning(
|
| 155 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 156 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 160 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 161 |
+
else:
|
| 162 |
+
attention_mask = None
|
| 163 |
+
|
| 164 |
+
text_embeddings = self.text_encoder(
|
| 165 |
+
text_input_ids.to(device),
|
| 166 |
+
attention_mask=attention_mask,
|
| 167 |
+
)
|
| 168 |
+
text_embeddings = text_embeddings[0]
|
| 169 |
+
|
| 170 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 171 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 172 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 173 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 174 |
+
|
| 175 |
+
# get unconditional embeddings for classifier free guidance
|
| 176 |
+
if do_classifier_free_guidance:
|
| 177 |
+
uncond_tokens: List[str]
|
| 178 |
+
if negative_prompt is None:
|
| 179 |
+
uncond_tokens = [""] * batch_size
|
| 180 |
+
elif type(prompt) is not type(negative_prompt):
|
| 181 |
+
raise TypeError(
|
| 182 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 183 |
+
f" {type(prompt)}."
|
| 184 |
+
)
|
| 185 |
+
elif isinstance(negative_prompt, str):
|
| 186 |
+
uncond_tokens = [negative_prompt]
|
| 187 |
+
elif batch_size != len(negative_prompt):
|
| 188 |
+
raise ValueError(
|
| 189 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 190 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 191 |
+
" the batch size of `prompt`."
|
| 192 |
+
)
|
| 193 |
+
else:
|
| 194 |
+
uncond_tokens = negative_prompt
|
| 195 |
+
|
| 196 |
+
max_length = text_input_ids.shape[-1]
|
| 197 |
+
uncond_input = self.tokenizer(
|
| 198 |
+
uncond_tokens,
|
| 199 |
+
padding="max_length",
|
| 200 |
+
max_length=max_length,
|
| 201 |
+
truncation=True,
|
| 202 |
+
return_tensors="pt",
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 206 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 207 |
+
else:
|
| 208 |
+
attention_mask = None
|
| 209 |
+
|
| 210 |
+
uncond_embeddings = self.text_encoder(
|
| 211 |
+
uncond_input.input_ids.to(device),
|
| 212 |
+
attention_mask=attention_mask,
|
| 213 |
+
)
|
| 214 |
+
uncond_embeddings = uncond_embeddings[0]
|
| 215 |
+
|
| 216 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 217 |
+
seq_len = uncond_embeddings.shape[1]
|
| 218 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 219 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 220 |
+
|
| 221 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 222 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 223 |
+
# to avoid doing two forward passes
|
| 224 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 225 |
+
|
| 226 |
+
return text_embeddings
|
| 227 |
+
|
| 228 |
+
def run_safety_checker(self, image, device, dtype):
|
| 229 |
+
if self.safety_checker is not None:
|
| 230 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 231 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 232 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 233 |
+
)
|
| 234 |
+
else:
|
| 235 |
+
has_nsfw_concept = None
|
| 236 |
+
return image, has_nsfw_concept
|
| 237 |
+
|
| 238 |
+
def decode_latents(self, latents):
|
| 239 |
+
latents = 1 / 0.18215 * latents
|
| 240 |
+
image = self.vae.decode(latents).sample
|
| 241 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 242 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 243 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 244 |
+
return image
|
| 245 |
+
|
| 246 |
+
def check_inputs(self, prompt, height, width, callback_steps):
|
| 247 |
+
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
| 248 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 249 |
+
|
| 250 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 251 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 252 |
+
|
| 253 |
+
if (callback_steps is None) or (
|
| 254 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 255 |
+
):
|
| 256 |
+
raise ValueError(
|
| 257 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 258 |
+
f" {type(callback_steps)}."
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 262 |
+
shape = (batch_size, num_channels_latents, height // 8, width // 8)
|
| 263 |
+
if latents is None:
|
| 264 |
+
if device.type == "mps":
|
| 265 |
+
# randn does not work reproducibly on mps
|
| 266 |
+
latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
|
| 267 |
+
else:
|
| 268 |
+
latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
|
| 269 |
+
else:
|
| 270 |
+
if latents.shape != shape:
|
| 271 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 272 |
+
latents = latents.to(device)
|
| 273 |
+
|
| 274 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 275 |
+
return latents
|
| 276 |
+
|
| 277 |
+
@torch.no_grad()
|
| 278 |
+
def __call__(
|
| 279 |
+
self,
|
| 280 |
+
prompt: Union[str, List[str]],
|
| 281 |
+
height: int = 512,
|
| 282 |
+
width: int = 512,
|
| 283 |
+
num_inference_steps: int = 50,
|
| 284 |
+
guidance_scale: float = 7.5,
|
| 285 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 286 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 287 |
+
eta: float = 0.0,
|
| 288 |
+
generator: Optional[torch.Generator] = None,
|
| 289 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 290 |
+
output_type: Optional[str] = "pil",
|
| 291 |
+
return_dict: bool = True,
|
| 292 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 293 |
+
callback_steps: int = 1,
|
| 294 |
+
**kwargs,
|
| 295 |
+
):
|
| 296 |
+
r"""
|
| 297 |
+
Function invoked when calling the pipeline for generation.
|
| 298 |
+
|
| 299 |
+
Args:
|
| 300 |
+
prompt (`str` or `List[str]`):
|
| 301 |
+
The prompt or prompts to guide the image generation.
|
| 302 |
+
height (`int`, *optional*, defaults to 512):
|
| 303 |
+
The height in pixels of the generated image.
|
| 304 |
+
width (`int`, *optional*, defaults to 512):
|
| 305 |
+
The width in pixels of the generated image.
|
| 306 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 307 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 308 |
+
expense of slower inference.
|
| 309 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 310 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 311 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 312 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 313 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 314 |
+
usually at the expense of lower image quality.
|
| 315 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 316 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 317 |
+
if `guidance_scale` is less than `1`).
|
| 318 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 319 |
+
The number of images to generate per prompt.
|
| 320 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 321 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 322 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 323 |
+
generator (`torch.Generator`, *optional*):
|
| 324 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 325 |
+
deterministic.
|
| 326 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 327 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 328 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 329 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 330 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 331 |
+
The output format of the generate image. Choose between
|
| 332 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 333 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 334 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 335 |
+
plain tuple.
|
| 336 |
+
callback (`Callable`, *optional*):
|
| 337 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 338 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 339 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 340 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 341 |
+
called at every step.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 345 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 346 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 347 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 348 |
+
(nsfw) content, according to the `safety_checker`.
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
# 1. Check inputs. Raise error if not correct
|
| 352 |
+
self.check_inputs(prompt, height, width, callback_steps)
|
| 353 |
+
|
| 354 |
+
# 2. Define call parameters
|
| 355 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
| 356 |
+
device = self._execution_device
|
| 357 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 358 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 359 |
+
# corresponds to doing no classifier free guidance.
|
| 360 |
+
do_classifier_free_guidance = True
|
| 361 |
+
if guidance_scale <= 1.0:
|
| 362 |
+
raise ValueError("has to use guidance_scale")
|
| 363 |
+
|
| 364 |
+
# 3. Encode input prompt
|
| 365 |
+
text_embeddings = self._encode_prompt(
|
| 366 |
+
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# 4. Prepare timesteps
|
| 370 |
+
self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
|
| 371 |
+
sigmas = self.scheduler.sigmas
|
| 372 |
+
sigmas = sigmas.to(text_embeddings.dtype)
|
| 373 |
+
|
| 374 |
+
# 5. Prepare latent variables
|
| 375 |
+
num_channels_latents = self.unet.config.in_channels
|
| 376 |
+
latents = self.prepare_latents(
|
| 377 |
+
batch_size * num_images_per_prompt,
|
| 378 |
+
num_channels_latents,
|
| 379 |
+
height,
|
| 380 |
+
width,
|
| 381 |
+
text_embeddings.dtype,
|
| 382 |
+
device,
|
| 383 |
+
generator,
|
| 384 |
+
latents,
|
| 385 |
+
)
|
| 386 |
+
latents = latents * sigmas[0]
|
| 387 |
+
self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
|
| 388 |
+
self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
|
| 389 |
+
|
| 390 |
+
def model_fn(x, t):
|
| 391 |
+
latent_model_input = torch.cat([x] * 2)
|
| 392 |
+
|
| 393 |
+
noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
|
| 394 |
+
|
| 395 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 396 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 397 |
+
return noise_pred
|
| 398 |
+
|
| 399 |
+
latents = self.sampler(model_fn, latents, sigmas)
|
| 400 |
+
|
| 401 |
+
# 8. Post-processing
|
| 402 |
+
image = self.decode_latents(latents)
|
| 403 |
+
|
| 404 |
+
# 9. Run safety checker
|
| 405 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
|
| 406 |
+
|
| 407 |
+
# 10. Convert to PIL
|
| 408 |
+
if output_type == "pil":
|
| 409 |
+
image = self.numpy_to_pil(image)
|
| 410 |
+
|
| 411 |
+
if not return_dict:
|
| 412 |
+
return (image, has_nsfw_concept)
|
| 413 |
+
|
| 414 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|