Spaces:
Runtime error
Runtime error
| import torch | |
| import os | |
| import json | |
| from dataclasses import dataclass | |
| from einops import rearrange, repeat | |
| from typing import Any, Dict, Optional, Tuple | |
| from diffusers.models import Transformer2DModel | |
| from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate | |
| from diffusers.models.embeddings import get_1d_sincos_pos_embed_from_grid, ImagePositionalEmbeddings, CaptionProjection, PatchEmbed, CombinedTimestepSizeEmbeddings | |
| from diffusers.configuration_utils import ConfigMixin, register_to_config | |
| from diffusers.models.modeling_utils import ModelMixin | |
| from diffusers.models.attention import BasicTransformerBlock | |
| from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear | |
| from diffusers.utils.torch_utils import maybe_allow_in_graph | |
| from diffusers.models.embeddings import SinusoidalPositionalEmbedding | |
| from diffusers.models.normalization import AdaLayerNorm, AdaLayerNormZero | |
| from diffusers.models.attention_processor import Attention | |
| from diffusers.models.activations import GEGLU, GELU, ApproximateGELU | |
| from dataclasses import dataclass | |
| import torch | |
| import torch.nn.functional as F | |
| from torch import nn | |
| class GatedSelfAttentionDense(nn.Module): | |
| r""" | |
| A gated self-attention dense layer that combines visual features and object features. | |
| Parameters: | |
| query_dim (`int`): The number of channels in the query. | |
| context_dim (`int`): The number of channels in the context. | |
| n_heads (`int`): The number of heads to use for attention. | |
| d_head (`int`): The number of channels in each head. | |
| """ | |
| def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): | |
| super().__init__() | |
| # we need a linear projection since we need cat visual feature and obj feature | |
| self.linear = nn.Linear(context_dim, query_dim) | |
| self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) | |
| self.ff = FeedForward(query_dim, activation_fn="geglu") | |
| self.norm1 = nn.LayerNorm(query_dim) | |
| self.norm2 = nn.LayerNorm(query_dim) | |
| self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) | |
| self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) | |
| self.enabled = True | |
| def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: | |
| if not self.enabled: | |
| return x | |
| n_visual = x.shape[1] | |
| objs = self.linear(objs) | |
| x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] | |
| x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) | |
| return x | |
| class FeedForward(nn.Module): | |
| r""" | |
| A feed-forward layer. | |
| Parameters: | |
| dim (`int`): The number of channels in the input. | |
| dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. | |
| mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. | |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. | |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. | |
| final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. | |
| """ | |
| def __init__( | |
| self, | |
| dim: int, | |
| dim_out: Optional[int] = None, | |
| mult: int = 4, | |
| dropout: float = 0.0, | |
| activation_fn: str = "geglu", | |
| final_dropout: bool = False, | |
| ): | |
| super().__init__() | |
| inner_dim = int(dim * mult) | |
| dim_out = dim_out if dim_out is not None else dim | |
| linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear | |
| if activation_fn == "gelu": | |
| act_fn = GELU(dim, inner_dim) | |
| if activation_fn == "gelu-approximate": | |
| act_fn = GELU(dim, inner_dim, approximate="tanh") | |
| elif activation_fn == "geglu": | |
| act_fn = GEGLU(dim, inner_dim) | |
| elif activation_fn == "geglu-approximate": | |
| act_fn = ApproximateGELU(dim, inner_dim) | |
| self.net = nn.ModuleList([]) | |
| # project in | |
| self.net.append(act_fn) | |
| # project dropout | |
| self.net.append(nn.Dropout(dropout)) | |
| # project out | |
| self.net.append(linear_cls(inner_dim, dim_out)) | |
| # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout | |
| if final_dropout: | |
| self.net.append(nn.Dropout(dropout)) | |
| def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: | |
| compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear) | |
| for module in self.net: | |
| if isinstance(module, compatible_cls): | |
| hidden_states = module(hidden_states, scale) | |
| else: | |
| hidden_states = module(hidden_states) | |
| return hidden_states | |
| class BasicTransformerBlock_(nn.Module): | |
| r""" | |
| A basic Transformer block. | |
| Parameters: | |
| dim (`int`): The number of channels in the input and output. | |
| num_attention_heads (`int`): The number of heads to use for multi-head attention. | |
| attention_head_dim (`int`): The number of channels in each head. | |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. | |
| cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. | |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. | |
| num_embeds_ada_norm (: | |
| obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. | |
| attention_bias (: | |
| obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. | |
| only_cross_attention (`bool`, *optional*): | |
| Whether to use only cross-attention layers. In this case two cross attention layers are used. | |
| double_self_attention (`bool`, *optional*): | |
| Whether to use two self-attention layers. In this case no cross attention layers are used. | |
| upcast_attention (`bool`, *optional*): | |
| Whether to upcast the attention computation to float32. This is useful for mixed precision training. | |
| norm_elementwise_affine (`bool`, *optional*, defaults to `True`): | |
| Whether to use learnable elementwise affine parameters for normalization. | |
| norm_type (`str`, *optional*, defaults to `"layer_norm"`): | |
| The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. | |
| final_dropout (`bool` *optional*, defaults to False): | |
| Whether to apply a final dropout after the last feed-forward layer. | |
| attention_type (`str`, *optional*, defaults to `"default"`): | |
| The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. | |
| positional_embeddings (`str`, *optional*, defaults to `None`): | |
| The type of positional embeddings to apply to. | |
| num_positional_embeddings (`int`, *optional*, defaults to `None`): | |
| The maximum number of positional embeddings to apply. | |
| """ | |
| def __init__( | |
| self, | |
| dim: int, | |
| num_attention_heads: int, | |
| attention_head_dim: int, | |
| dropout=0.0, | |
| cross_attention_dim: Optional[int] = None, | |
| activation_fn: str = "geglu", | |
| num_embeds_ada_norm: Optional[int] = None, | |
| attention_bias: bool = False, | |
| only_cross_attention: bool = False, | |
| double_self_attention: bool = False, | |
| upcast_attention: bool = False, | |
| norm_elementwise_affine: bool = True, | |
| norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single' | |
| norm_eps: float = 1e-5, | |
| final_dropout: bool = False, | |
| attention_type: str = "default", | |
| positional_embeddings: Optional[str] = None, | |
| num_positional_embeddings: Optional[int] = None, | |
| ): | |
| super().__init__() | |
| self.only_cross_attention = only_cross_attention | |
| self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" | |
| self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" | |
| self.use_ada_layer_norm_single = norm_type == "ada_norm_single" | |
| self.use_layer_norm = norm_type == "layer_norm" | |
| if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: | |
| raise ValueError( | |
| f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" | |
| f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." | |
| ) | |
| if positional_embeddings and (num_positional_embeddings is None): | |
| raise ValueError( | |
| "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." | |
| ) | |
| if positional_embeddings == "sinusoidal": | |
| self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) | |
| else: | |
| self.pos_embed = None | |
| # Define 3 blocks. Each block has its own normalization layer. | |
| # 1. Self-Attn | |
| if self.use_ada_layer_norm: | |
| self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) | |
| elif self.use_ada_layer_norm_zero: | |
| self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) | |
| else: | |
| self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) | |
| self.attn1 = Attention( | |
| query_dim=dim, | |
| heads=num_attention_heads, | |
| dim_head=attention_head_dim, | |
| dropout=dropout, | |
| bias=attention_bias, | |
| cross_attention_dim=cross_attention_dim if only_cross_attention else None, | |
| upcast_attention=upcast_attention, | |
| ) | |
| # # 2. Cross-Attn | |
| # if cross_attention_dim is not None or double_self_attention: | |
| # # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. | |
| # # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during | |
| # # the second cross attention block. | |
| # self.norm2 = ( | |
| # AdaLayerNorm(dim, num_embeds_ada_norm) | |
| # if self.use_ada_layer_norm | |
| # else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) | |
| # ) | |
| # self.attn2 = Attention( | |
| # query_dim=dim, | |
| # cross_attention_dim=cross_attention_dim if not double_self_attention else None, | |
| # heads=num_attention_heads, | |
| # dim_head=attention_head_dim, | |
| # dropout=dropout, | |
| # bias=attention_bias, | |
| # upcast_attention=upcast_attention, | |
| # ) # is self-attn if encoder_hidden_states is none | |
| # else: | |
| # self.norm2 = None | |
| # self.attn2 = None | |
| # 3. Feed-forward | |
| # if not self.use_ada_layer_norm_single: | |
| # self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) | |
| self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) | |
| self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) | |
| # 4. Fuser | |
| if attention_type == "gated" or attention_type == "gated-text-image": | |
| self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) | |
| # 5. Scale-shift for PixArt-Alpha. | |
| if self.use_ada_layer_norm_single: | |
| self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) | |
| # let chunk size default to None | |
| self._chunk_size = None | |
| self._chunk_dim = 0 | |
| def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int): | |
| # Sets chunk feed-forward | |
| self._chunk_size = chunk_size | |
| self._chunk_dim = dim | |
| def forward( | |
| self, | |
| hidden_states: torch.FloatTensor, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| timestep: Optional[torch.LongTensor] = None, | |
| cross_attention_kwargs: Dict[str, Any] = None, | |
| class_labels: Optional[torch.LongTensor] = None, | |
| ) -> torch.FloatTensor: | |
| # Notice that normalization is always applied before the real computation in the following blocks. | |
| # 0. Self-Attention | |
| batch_size = hidden_states.shape[0] | |
| if self.use_ada_layer_norm: | |
| norm_hidden_states = self.norm1(hidden_states, timestep) | |
| elif self.use_ada_layer_norm_zero: | |
| norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( | |
| hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype | |
| ) | |
| elif self.use_layer_norm: | |
| norm_hidden_states = self.norm1(hidden_states) | |
| elif self.use_ada_layer_norm_single: | |
| shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( | |
| self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) | |
| ).chunk(6, dim=1) | |
| norm_hidden_states = self.norm1(hidden_states) | |
| norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa | |
| norm_hidden_states = norm_hidden_states.squeeze(1) | |
| else: | |
| raise ValueError("Incorrect norm used") | |
| if self.pos_embed is not None: | |
| norm_hidden_states = self.pos_embed(norm_hidden_states) | |
| # 1. Retrieve lora scale. | |
| lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 | |
| # 2. Prepare GLIGEN inputs | |
| cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} | |
| gligen_kwargs = cross_attention_kwargs.pop("gligen", None) | |
| attn_output = self.attn1( | |
| norm_hidden_states, | |
| encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, | |
| attention_mask=attention_mask, | |
| **cross_attention_kwargs, | |
| ) | |
| if self.use_ada_layer_norm_zero: | |
| attn_output = gate_msa.unsqueeze(1) * attn_output | |
| elif self.use_ada_layer_norm_single: | |
| attn_output = gate_msa * attn_output | |
| hidden_states = attn_output + hidden_states | |
| if hidden_states.ndim == 4: | |
| hidden_states = hidden_states.squeeze(1) | |
| # 2.5 GLIGEN Control | |
| if gligen_kwargs is not None: | |
| hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) | |
| # # 3. Cross-Attention | |
| # if self.attn2 is not None: | |
| # if self.use_ada_layer_norm: | |
| # norm_hidden_states = self.norm2(hidden_states, timestep) | |
| # elif self.use_ada_layer_norm_zero or self.use_layer_norm: | |
| # norm_hidden_states = self.norm2(hidden_states) | |
| # elif self.use_ada_layer_norm_single: | |
| # # For PixArt norm2 isn't applied here: | |
| # # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 | |
| # norm_hidden_states = hidden_states | |
| # else: | |
| # raise ValueError("Incorrect norm") | |
| # if self.pos_embed is not None and self.use_ada_layer_norm_single is False: | |
| # norm_hidden_states = self.pos_embed(norm_hidden_states) | |
| # attn_output = self.attn2( | |
| # norm_hidden_states, | |
| # encoder_hidden_states=encoder_hidden_states, | |
| # attention_mask=encoder_attention_mask, | |
| # **cross_attention_kwargs, | |
| # ) | |
| # hidden_states = attn_output + hidden_states | |
| # 4. Feed-forward | |
| # if not self.use_ada_layer_norm_single: | |
| # norm_hidden_states = self.norm3(hidden_states) | |
| if self.use_ada_layer_norm_zero: | |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] | |
| if self.use_ada_layer_norm_single: | |
| # norm_hidden_states = self.norm2(hidden_states) | |
| norm_hidden_states = self.norm3(hidden_states) | |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp | |
| if self._chunk_size is not None: | |
| # "feed_forward_chunk_size" can be used to save memory | |
| if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: | |
| raise ValueError( | |
| f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." | |
| ) | |
| num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size | |
| ff_output = torch.cat( | |
| [ | |
| self.ff(hid_slice, scale=lora_scale) | |
| for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim) | |
| ], | |
| dim=self._chunk_dim, | |
| ) | |
| else: | |
| ff_output = self.ff(norm_hidden_states, scale=lora_scale) | |
| if self.use_ada_layer_norm_zero: | |
| ff_output = gate_mlp.unsqueeze(1) * ff_output | |
| elif self.use_ada_layer_norm_single: | |
| ff_output = gate_mlp * ff_output | |
| hidden_states = ff_output + hidden_states | |
| if hidden_states.ndim == 4: | |
| hidden_states = hidden_states.squeeze(1) | |
| return hidden_states | |
| class AdaLayerNormSingle(nn.Module): | |
| r""" | |
| Norm layer adaptive layer norm single (adaLN-single). | |
| As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). | |
| Parameters: | |
| embedding_dim (`int`): The size of each embedding vector. | |
| use_additional_conditions (`bool`): To use additional conditions for normalization or not. | |
| """ | |
| def __init__(self, embedding_dim: int, use_additional_conditions: bool = False): | |
| super().__init__() | |
| self.emb = CombinedTimestepSizeEmbeddings( | |
| embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions | |
| ) | |
| self.silu = nn.SiLU() | |
| self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) | |
| def forward( | |
| self, | |
| timestep: torch.Tensor, | |
| added_cond_kwargs: Dict[str, torch.Tensor] = None, | |
| batch_size: int = None, | |
| hidden_dtype: Optional[torch.dtype] = None, | |
| ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: | |
| # No modulation happening here. | |
| embedded_timestep = self.emb(timestep, batch_size=batch_size, hidden_dtype=hidden_dtype, resolution=None, aspect_ratio=None) | |
| return self.linear(self.silu(embedded_timestep)), embedded_timestep | |
| class Transformer3DModelOutput(BaseOutput): | |
| """ | |
| The output of [`Transformer2DModel`]. | |
| Args: | |
| sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): | |
| The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability | |
| distributions for the unnoised latent pixels. | |
| """ | |
| sample: torch.FloatTensor | |
| class LatteT2V(ModelMixin, ConfigMixin): | |
| _supports_gradient_checkpointing = True | |
| """ | |
| A 2D Transformer model for image-like data. | |
| Parameters: | |
| num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. | |
| attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. | |
| in_channels (`int`, *optional*): | |
| The number of channels in the input and output (specify if the input is **continuous**). | |
| num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. | |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. | |
| cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. | |
| sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). | |
| This is fixed during training since it is used to learn a number of position embeddings. | |
| num_vector_embeds (`int`, *optional*): | |
| The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). | |
| Includes the class for the masked latent pixel. | |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. | |
| num_embeds_ada_norm ( `int`, *optional*): | |
| The number of diffusion steps used during training. Pass if at least one of the norm_layers is | |
| `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are | |
| added to the hidden states. | |
| During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. | |
| attention_bias (`bool`, *optional*): | |
| Configure if the `TransformerBlocks` attention should contain a bias parameter. | |
| """ | |
| def __init__( | |
| self, | |
| num_attention_heads: int = 16, | |
| attention_head_dim: int = 88, | |
| in_channels: Optional[int] = None, | |
| out_channels: Optional[int] = None, | |
| num_layers: int = 1, | |
| dropout: float = 0.0, | |
| norm_num_groups: int = 32, | |
| cross_attention_dim: Optional[int] = None, | |
| attention_bias: bool = False, | |
| sample_size: Optional[int] = None, | |
| num_vector_embeds: Optional[int] = None, | |
| patch_size: Optional[int] = None, | |
| activation_fn: str = "geglu", | |
| num_embeds_ada_norm: Optional[int] = None, | |
| use_linear_projection: bool = False, | |
| only_cross_attention: bool = False, | |
| double_self_attention: bool = False, | |
| upcast_attention: bool = False, | |
| norm_type: str = "layer_norm", | |
| norm_elementwise_affine: bool = True, | |
| norm_eps: float = 1e-5, | |
| attention_type: str = "default", | |
| caption_channels: int = None, | |
| video_length: int = 16, | |
| ): | |
| super().__init__() | |
| self.use_linear_projection = use_linear_projection | |
| self.num_attention_heads = num_attention_heads | |
| self.attention_head_dim = attention_head_dim | |
| inner_dim = num_attention_heads * attention_head_dim | |
| self.video_length = video_length | |
| conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv | |
| linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear | |
| # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` | |
| # Define whether input is continuous or discrete depending on configuration | |
| self.is_input_continuous = (in_channels is not None) and (patch_size is None) | |
| self.is_input_vectorized = num_vector_embeds is not None | |
| self.is_input_patches = in_channels is not None and patch_size is not None | |
| if norm_type == "layer_norm" and num_embeds_ada_norm is not None: | |
| deprecation_message = ( | |
| f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" | |
| " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." | |
| " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" | |
| " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" | |
| " would be very nice if you could open a Pull request for the `transformer/config.json` file" | |
| ) | |
| deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) | |
| norm_type = "ada_norm" | |
| if self.is_input_continuous and self.is_input_vectorized: | |
| raise ValueError( | |
| f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" | |
| " sure that either `in_channels` or `num_vector_embeds` is None." | |
| ) | |
| elif self.is_input_vectorized and self.is_input_patches: | |
| raise ValueError( | |
| f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" | |
| " sure that either `num_vector_embeds` or `num_patches` is None." | |
| ) | |
| elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: | |
| raise ValueError( | |
| f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" | |
| f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." | |
| ) | |
| # 2. Define input layers | |
| if self.is_input_continuous: | |
| self.in_channels = in_channels | |
| self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) | |
| if use_linear_projection: | |
| self.proj_in = linear_cls(in_channels, inner_dim) | |
| else: | |
| self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) | |
| elif self.is_input_vectorized: | |
| assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" | |
| assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" | |
| self.height = sample_size | |
| self.width = sample_size | |
| self.num_vector_embeds = num_vector_embeds | |
| self.num_latent_pixels = self.height * self.width | |
| self.latent_image_embedding = ImagePositionalEmbeddings( | |
| num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width | |
| ) | |
| elif self.is_input_patches: | |
| assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size" | |
| self.height = sample_size | |
| self.width = sample_size | |
| self.patch_size = patch_size | |
| interpolation_scale = self.config.sample_size // 64 # => 64 (= 512 pixart) has interpolation scale 1 | |
| interpolation_scale = max(interpolation_scale, 1) | |
| self.pos_embed = PatchEmbed( | |
| height=sample_size, | |
| width=sample_size, | |
| patch_size=patch_size, | |
| in_channels=in_channels, | |
| embed_dim=inner_dim, | |
| interpolation_scale=interpolation_scale, | |
| ) | |
| # 3. Define transformers blocks | |
| self.transformer_blocks = nn.ModuleList( | |
| [ | |
| BasicTransformerBlock( | |
| inner_dim, | |
| num_attention_heads, | |
| attention_head_dim, | |
| dropout=dropout, | |
| cross_attention_dim=cross_attention_dim, | |
| activation_fn=activation_fn, | |
| num_embeds_ada_norm=num_embeds_ada_norm, | |
| attention_bias=attention_bias, | |
| only_cross_attention=only_cross_attention, | |
| double_self_attention=double_self_attention, | |
| upcast_attention=upcast_attention, | |
| norm_type=norm_type, | |
| norm_elementwise_affine=norm_elementwise_affine, | |
| norm_eps=norm_eps, | |
| attention_type=attention_type, | |
| ) | |
| for d in range(num_layers) | |
| ] | |
| ) | |
| # Define temporal transformers blocks | |
| self.temporal_transformer_blocks = nn.ModuleList( | |
| [ | |
| BasicTransformerBlock_( # one attention | |
| inner_dim, | |
| num_attention_heads, # num_attention_heads | |
| attention_head_dim, # attention_head_dim 72 | |
| dropout=dropout, | |
| cross_attention_dim=None, | |
| activation_fn=activation_fn, | |
| num_embeds_ada_norm=num_embeds_ada_norm, | |
| attention_bias=attention_bias, | |
| only_cross_attention=only_cross_attention, | |
| double_self_attention=False, | |
| upcast_attention=upcast_attention, | |
| norm_type=norm_type, | |
| norm_elementwise_affine=norm_elementwise_affine, | |
| norm_eps=norm_eps, | |
| attention_type=attention_type, | |
| ) | |
| for d in range(num_layers) | |
| ] | |
| ) | |
| # 4. Define output layers | |
| self.out_channels = in_channels if out_channels is None else out_channels | |
| if self.is_input_continuous: | |
| # TODO: should use out_channels for continuous projections | |
| if use_linear_projection: | |
| self.proj_out = linear_cls(inner_dim, in_channels) | |
| else: | |
| self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) | |
| elif self.is_input_vectorized: | |
| self.norm_out = nn.LayerNorm(inner_dim) | |
| self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1) | |
| elif self.is_input_patches and norm_type != "ada_norm_single": | |
| self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) | |
| self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim) | |
| self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) | |
| elif self.is_input_patches and norm_type == "ada_norm_single": | |
| self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) | |
| self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) | |
| self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) | |
| # 5. PixArt-Alpha blocks. | |
| self.adaln_single = None | |
| self.use_additional_conditions = False | |
| if norm_type == "ada_norm_single": | |
| self.use_additional_conditions = self.config.sample_size == 128 # False, 128 -> 1024 | |
| # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use | |
| # additional conditions until we find better name | |
| self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions) | |
| self.caption_projection = None | |
| if caption_channels is not None: | |
| self.caption_projection = CaptionProjection(in_features=caption_channels, hidden_size=inner_dim) | |
| self.gradient_checkpointing = False | |
| # define temporal positional embedding | |
| temp_pos_embed = self.get_1d_sincos_temp_embed(inner_dim, video_length) # 1152 hidden size | |
| self.register_buffer("temp_pos_embed", torch.from_numpy(temp_pos_embed).float().unsqueeze(0), persistent=False) | |
| def _set_gradient_checkpointing(self, module, value=False): | |
| self.gradient_checkpointing = value | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| timestep: Optional[torch.LongTensor] = None, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| added_cond_kwargs: Dict[str, torch.Tensor] = None, | |
| class_labels: Optional[torch.LongTensor] = None, | |
| cross_attention_kwargs: Dict[str, Any] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| encoder_attention_mask: Optional[torch.Tensor] = None, | |
| use_image_num: int = 0, | |
| enable_temporal_attentions: bool = True, | |
| return_dict: bool = True, | |
| ): | |
| """ | |
| The [`Transformer2DModel`] forward method. | |
| Args: | |
| hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, frame, channel, height, width)` if continuous): | |
| Input `hidden_states`. | |
| encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): | |
| Conditional embeddings for cross attention layer. If not given, cross-attention defaults to | |
| self-attention. | |
| timestep ( `torch.LongTensor`, *optional*): | |
| Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. | |
| class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): | |
| Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in | |
| `AdaLayerZeroNorm`. | |
| cross_attention_kwargs ( `Dict[str, Any]`, *optional*): | |
| A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | |
| `self.processor` in | |
| [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | |
| attention_mask ( `torch.Tensor`, *optional*): | |
| An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask | |
| is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large | |
| negative values to the attention scores corresponding to "discard" tokens. | |
| encoder_attention_mask ( `torch.Tensor`, *optional*): | |
| Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: | |
| * Mask `(batch, sequence_length)` True = keep, False = discard. | |
| * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. | |
| If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format | |
| above. This bias will be added to the cross-attention scores. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain | |
| tuple. | |
| Returns: | |
| If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a | |
| `tuple` where the first element is the sample tensor. | |
| """ | |
| input_batch_size, c, frame, h, w = hidden_states.shape | |
| frame = frame - use_image_num | |
| hidden_states = rearrange(hidden_states, 'b c f h w -> (b f) c h w').contiguous() | |
| # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. | |
| # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. | |
| # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. | |
| # expects mask of shape: | |
| # [batch, key_tokens] | |
| # adds singleton query_tokens dimension: | |
| # [batch, 1, key_tokens] | |
| # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: | |
| # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) | |
| # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) | |
| if attention_mask is not None and attention_mask.ndim == 2: | |
| # assume that mask is expressed as: | |
| # (1 = keep, 0 = discard) | |
| # convert mask into a bias that can be added to attention scores: | |
| # (keep = +0, discard = -10000.0) | |
| attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 | |
| attention_mask = attention_mask.unsqueeze(1) | |
| # convert encoder_attention_mask to a bias the same way we do for attention_mask | |
| if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: # ndim == 2 means no image joint | |
| encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 | |
| encoder_attention_mask = encoder_attention_mask.unsqueeze(1) | |
| encoder_attention_mask = repeat(encoder_attention_mask, 'b 1 l -> (b f) 1 l', f=frame).contiguous() | |
| elif encoder_attention_mask is not None and encoder_attention_mask.ndim == 3: # ndim == 3 means image joint | |
| encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 | |
| encoder_attention_mask_video = encoder_attention_mask[:, :1, ...] | |
| encoder_attention_mask_video = repeat(encoder_attention_mask_video, 'b 1 l -> b (1 f) l', f=frame).contiguous() | |
| encoder_attention_mask_image = encoder_attention_mask[:, 1:, ...] | |
| encoder_attention_mask = torch.cat([encoder_attention_mask_video, encoder_attention_mask_image], dim=1) | |
| encoder_attention_mask = rearrange(encoder_attention_mask, 'b n l -> (b n) l').contiguous().unsqueeze(1) | |
| # Retrieve lora scale. | |
| lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 | |
| # 1. Input | |
| if self.is_input_patches: # here | |
| height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size | |
| num_patches = height * width | |
| hidden_states = self.pos_embed(hidden_states) # alrady add positional embeddings | |
| if self.adaln_single is not None: | |
| if self.use_additional_conditions and added_cond_kwargs is None: | |
| raise ValueError( | |
| "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." | |
| ) | |
| # batch_size = hidden_states.shape[0] | |
| batch_size = input_batch_size | |
| timestep, embedded_timestep = self.adaln_single( | |
| timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype | |
| ) | |
| # 2. Blocks | |
| if self.caption_projection is not None: | |
| batch_size = hidden_states.shape[0] | |
| encoder_hidden_states = self.caption_projection(encoder_hidden_states) # 3 120 1152 | |
| if use_image_num != 0 and self.training: | |
| encoder_hidden_states_video = encoder_hidden_states[:, :1, ...] | |
| encoder_hidden_states_video = repeat(encoder_hidden_states_video, 'b 1 t d -> b (1 f) t d', f=frame).contiguous() | |
| encoder_hidden_states_image = encoder_hidden_states[:, 1:, ...] | |
| encoder_hidden_states = torch.cat([encoder_hidden_states_video, encoder_hidden_states_image], dim=1) | |
| encoder_hidden_states_spatial = rearrange(encoder_hidden_states, 'b f t d -> (b f) t d').contiguous() | |
| else: | |
| encoder_hidden_states_spatial = repeat(encoder_hidden_states, 'b t d -> (b f) t d', f=frame).contiguous() | |
| # prepare timesteps for spatial and temporal block | |
| timestep_spatial = repeat(timestep, 'b d -> (b f) d', f=frame + use_image_num).contiguous() | |
| timestep_temp = repeat(timestep, 'b d -> (b p) d', p=num_patches).contiguous() | |
| for i, (spatial_block, temp_block) in enumerate(zip(self.transformer_blocks, self.temporal_transformer_blocks)): | |
| if self.training and self.gradient_checkpointing: | |
| hidden_states = torch.utils.checkpoint.checkpoint( | |
| spatial_block, | |
| hidden_states, | |
| attention_mask, | |
| encoder_hidden_states_spatial, | |
| encoder_attention_mask, | |
| timestep_spatial, | |
| cross_attention_kwargs, | |
| class_labels, | |
| use_reentrant=False, | |
| ) | |
| if enable_temporal_attentions: | |
| hidden_states = rearrange(hidden_states, '(b f) t d -> (b t) f d', b=input_batch_size).contiguous() | |
| if use_image_num != 0: # image-video joitn training | |
| hidden_states_video = hidden_states[:, :frame, ...] | |
| hidden_states_image = hidden_states[:, frame:, ...] | |
| if i == 0: | |
| hidden_states_video = hidden_states_video + self.temp_pos_embed | |
| hidden_states_video = torch.utils.checkpoint.checkpoint( | |
| temp_block, | |
| hidden_states_video, | |
| None, # attention_mask | |
| None, # encoder_hidden_states | |
| None, # encoder_attention_mask | |
| timestep_temp, | |
| cross_attention_kwargs, | |
| class_labels, | |
| use_reentrant=False, | |
| ) | |
| hidden_states = torch.cat([hidden_states_video, hidden_states_image], dim=1) | |
| hidden_states = rearrange(hidden_states, '(b t) f d -> (b f) t d', b=input_batch_size).contiguous() | |
| else: | |
| if i == 0: | |
| hidden_states = hidden_states + self.temp_pos_embed | |
| hidden_states = torch.utils.checkpoint.checkpoint( | |
| temp_block, | |
| hidden_states, | |
| None, # attention_mask | |
| None, # encoder_hidden_states | |
| None, # encoder_attention_mask | |
| timestep_temp, | |
| cross_attention_kwargs, | |
| class_labels, | |
| use_reentrant=False, | |
| ) | |
| hidden_states = rearrange(hidden_states, '(b t) f d -> (b f) t d', b=input_batch_size).contiguous() | |
| else: | |
| hidden_states = spatial_block( | |
| hidden_states, | |
| attention_mask, | |
| encoder_hidden_states_spatial, | |
| encoder_attention_mask, | |
| timestep_spatial, | |
| cross_attention_kwargs, | |
| class_labels, | |
| ) | |
| if enable_temporal_attentions: | |
| hidden_states = rearrange(hidden_states, '(b f) t d -> (b t) f d', b=input_batch_size).contiguous() | |
| if use_image_num != 0 and self.training: | |
| hidden_states_video = hidden_states[:, :frame, ...] | |
| hidden_states_image = hidden_states[:, frame:, ...] | |
| hidden_states_video = temp_block( | |
| hidden_states_video, | |
| None, # attention_mask | |
| None, # encoder_hidden_states | |
| None, # encoder_attention_mask | |
| timestep_temp, | |
| cross_attention_kwargs, | |
| class_labels, | |
| ) | |
| hidden_states = torch.cat([hidden_states_video, hidden_states_image], dim=1) | |
| hidden_states = rearrange(hidden_states, '(b t) f d -> (b f) t d', b=input_batch_size).contiguous() | |
| else: | |
| if i == 0: | |
| hidden_states = hidden_states + self.temp_pos_embed | |
| hidden_states = temp_block( | |
| hidden_states, | |
| None, # attention_mask | |
| None, # encoder_hidden_states | |
| None, # encoder_attention_mask | |
| timestep_temp, | |
| cross_attention_kwargs, | |
| class_labels, | |
| ) | |
| hidden_states = rearrange(hidden_states, '(b t) f d -> (b f) t d', b=input_batch_size).contiguous() | |
| if self.is_input_patches: | |
| if self.config.norm_type != "ada_norm_single": | |
| conditioning = self.transformer_blocks[0].norm1.emb( | |
| timestep, class_labels, hidden_dtype=hidden_states.dtype | |
| ) | |
| shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) | |
| hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] | |
| hidden_states = self.proj_out_2(hidden_states) | |
| elif self.config.norm_type == "ada_norm_single": | |
| embedded_timestep = repeat(embedded_timestep, 'b d -> (b f) d', f=frame + use_image_num).contiguous() | |
| shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) | |
| hidden_states = self.norm_out(hidden_states) | |
| # Modulation | |
| hidden_states = hidden_states * (1 + scale) + shift | |
| hidden_states = self.proj_out(hidden_states) | |
| # unpatchify | |
| if self.adaln_single is None: | |
| height = width = int(hidden_states.shape[1] ** 0.5) | |
| hidden_states = hidden_states.reshape( | |
| shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) | |
| ) | |
| hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) | |
| output = hidden_states.reshape( | |
| shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) | |
| ) | |
| output = rearrange(output, '(b f) c h w -> b c f h w', b=input_batch_size).contiguous() | |
| if not return_dict: | |
| return (output,) | |
| return Transformer3DModelOutput(sample=output) | |
| def get_1d_sincos_temp_embed(self, embed_dim, length): | |
| pos = torch.arange(0, length).unsqueeze(1) | |
| return get_1d_sincos_pos_embed_from_grid(embed_dim, pos) | |
| def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, **kwargs): | |
| if subfolder is not None: | |
| pretrained_model_path = os.path.join(pretrained_model_path, subfolder) | |
| config_file = os.path.join(pretrained_model_path, 'config.json') | |
| if not os.path.isfile(config_file): | |
| raise RuntimeError(f"{config_file} does not exist") | |
| with open(config_file, "r") as f: | |
| config = json.load(f) | |
| model = cls.from_config(config, **kwargs) | |
| # model_files = [ | |
| # os.path.join(pretrained_model_path, 'diffusion_pytorch_model.bin'), | |
| # os.path.join(pretrained_model_path, 'diffusion_pytorch_model.safetensors') | |
| # ] | |
| # model_file = None | |
| # for fp in model_files: | |
| # if os.path.exists(fp): | |
| # model_file = fp | |
| # if not model_file: | |
| # raise RuntimeError(f"{model_file} does not exist") | |
| # if model_file.split(".")[-1] == "safetensors": | |
| # from safetensors import safe_open | |
| # state_dict = {} | |
| # with safe_open(model_file, framework="pt", device="cpu") as f: | |
| # for key in f.keys(): | |
| # state_dict[key] = f.get_tensor(key) | |
| # else: | |
| # state_dict = torch.load(model_file, map_location="cpu") | |
| # for k, v in model.state_dict().items(): | |
| # if 'temporal_transformer_blocks' in k: | |
| # state_dict.update({k: v}) | |
| # model.load_state_dict(state_dict) | |
| return model |