Spaces:
Build error
Build error
Pie31415
commited on
Commit
·
71e9a42
1
Parent(s):
f5e4df7
gigant merge
Browse files- __assets__/run.gif +0 -0
- __assets__/run.mp4 +0 -0
- __assets__/walk_01.gif +0 -0
- __assets__/walk_01.mp4 +0 -0
- __assets__/walk_02.gif +0 -0
- __assets__/walk_02.mp4 +0 -0
- __assets__/walk_03.gif +0 -0
- __assets__/walk_03.mp4 +0 -0
- __assets__/walk_04.gif +0 -0
- __assets__/walk_04.mp4 +0 -0
- app.py +1 -1
- text_to_animation/model.py +84 -39
- text_to_animation/models/controlnet_flax.py +12 -29
- text_to_animation/models/cross_frame_attention_flax.py +336 -1
- text_to_animation/models/unet_2d_blocks_flax.py +237 -2
- text_to_animation/models/unet_2d_condition_flax.py +325 -31
- text_to_animation/pipelines/text_to_video_pipeline_flax.py +267 -635
- utils/gradio_utils.py +5 -1
- webui/app_control_animation.py +77 -45
__assets__/run.gif
ADDED
|
__assets__/run.mp4
ADDED
|
Binary file (13.2 kB). View file
|
|
|
__assets__/walk_01.gif
ADDED
|
__assets__/walk_01.mp4
ADDED
|
Binary file (33.2 kB). View file
|
|
|
__assets__/walk_02.gif
ADDED
|
__assets__/walk_02.mp4
ADDED
|
Binary file (47.6 kB). View file
|
|
|
__assets__/walk_03.gif
ADDED
|
__assets__/walk_03.mp4
ADDED
|
Binary file (43.2 kB). View file
|
|
|
__assets__/walk_04.gif
ADDED
|
__assets__/walk_04.mp4
ADDED
|
Binary file (64.9 kB). View file
|
|
|
app.py
CHANGED
|
@@ -11,7 +11,7 @@ import jax.numpy as jnp
|
|
| 11 |
huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
|
| 12 |
on_huggingspace = huggingspace_name if huggingspace_name is not None else False
|
| 13 |
|
| 14 |
-
model = ControlAnimationModel(
|
| 15 |
|
| 16 |
parser = argparse.ArgumentParser()
|
| 17 |
parser.add_argument(
|
|
|
|
| 11 |
huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
|
| 12 |
on_huggingspace = huggingspace_name if huggingspace_name is not None else False
|
| 13 |
|
| 14 |
+
model = ControlAnimationModel(dtype=jnp.float16)
|
| 15 |
|
| 16 |
parser = argparse.ArgumentParser()
|
| 17 |
parser.add_argument(
|
text_to_animation/model.py
CHANGED
|
@@ -19,10 +19,10 @@ from diffusers import (
|
|
| 19 |
FlaxAutoencoderKL,
|
| 20 |
FlaxStableDiffusionControlNetPipeline,
|
| 21 |
StableDiffusionPipeline,
|
| 22 |
-
FlaxUNet2DConditionModel,
|
| 23 |
)
|
| 24 |
from text_to_animation.models.unet_2d_condition_flax import (
|
| 25 |
-
FlaxUNet2DConditionModel
|
| 26 |
)
|
| 27 |
from diffusers import FlaxControlNetModel
|
| 28 |
|
|
@@ -82,10 +82,10 @@ class ControlAnimationModel:
|
|
| 82 |
feature_extractor = CLIPFeatureExtractor.from_pretrained(
|
| 83 |
model_id, subfolder="feature_extractor"
|
| 84 |
)
|
| 85 |
-
unet, unet_params =
|
| 86 |
model_id, subfolder="unet", from_pt=True, dtype=self.dtype
|
| 87 |
)
|
| 88 |
-
unet_vanilla
|
| 89 |
model_id, subfolder="unet", from_pt=True, dtype=self.dtype
|
| 90 |
)
|
| 91 |
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
|
|
@@ -141,8 +141,9 @@ class ControlAnimationModel:
|
|
| 141 |
|
| 142 |
seeds = [seed for seed in jax.random.randint(self.rng, [num_imgs], 0, 65536)]
|
| 143 |
prngs = [jax.random.PRNGKey(seed) for seed in seeds]
|
|
|
|
| 144 |
images = self.pipe.generate_starting_frames(
|
| 145 |
-
params=self.
|
| 146 |
prngs=prngs,
|
| 147 |
controlnet_image=control,
|
| 148 |
prompt=prompts,
|
|
@@ -153,30 +154,66 @@ class ControlAnimationModel:
|
|
| 153 |
|
| 154 |
return images
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
def generate_animation(
|
| 157 |
self,
|
| 158 |
-
prompt
|
| 159 |
-
initial_frame_index
|
| 160 |
-
input_video_path
|
| 161 |
-
model_link
|
| 162 |
-
motion_field_strength_x
|
| 163 |
-
motion_field_strength_y
|
| 164 |
-
t0
|
| 165 |
-
t1
|
| 166 |
-
n_prompt
|
| 167 |
-
chunk_size
|
| 168 |
-
video_length
|
| 169 |
-
merging_ratio
|
| 170 |
-
seed
|
| 171 |
-
resolution
|
| 172 |
-
fps
|
| 173 |
-
use_cf_attn
|
| 174 |
-
use_motion_field
|
| 175 |
-
smooth_bg
|
| 176 |
-
smooth_bg_strength
|
| 177 |
-
path
|
| 178 |
):
|
| 179 |
-
video_path = gradio_utils.motion_to_video_path(
|
| 180 |
|
| 181 |
# added_prompt = 'best quality, HD, clay stop-motion, claymation, HQ, masterpiece, art, smooth'
|
| 182 |
# added_prompt = 'high quality, anatomically correct, clay stop-motion, aardman, claymation, smooth'
|
|
@@ -187,18 +224,26 @@ class ControlAnimationModel:
|
|
| 187 |
video_path, resolution, None, self.dtype, False, output_fps=4
|
| 188 |
)
|
| 189 |
control = utils.pre_process_pose(video, apply_pose_detect=False)
|
| 190 |
-
|
| 191 |
-
|
| 192 |
prng_seed = jax.random.PRNGKey(seed)
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
return utils.create_gif(np.array(vid), 4, path=None, watermark=None)
|
|
|
|
| 19 |
FlaxAutoencoderKL,
|
| 20 |
FlaxStableDiffusionControlNetPipeline,
|
| 21 |
StableDiffusionPipeline,
|
| 22 |
+
FlaxUNet2DConditionModel as VanillaFlaxUNet2DConditionModel,
|
| 23 |
)
|
| 24 |
from text_to_animation.models.unet_2d_condition_flax import (
|
| 25 |
+
FlaxUNet2DConditionModel
|
| 26 |
)
|
| 27 |
from diffusers import FlaxControlNetModel
|
| 28 |
|
|
|
|
| 82 |
feature_extractor = CLIPFeatureExtractor.from_pretrained(
|
| 83 |
model_id, subfolder="feature_extractor"
|
| 84 |
)
|
| 85 |
+
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
|
| 86 |
model_id, subfolder="unet", from_pt=True, dtype=self.dtype
|
| 87 |
)
|
| 88 |
+
unet_vanilla = VanillaFlaxUNet2DConditionModel.from_config(
|
| 89 |
model_id, subfolder="unet", from_pt=True, dtype=self.dtype
|
| 90 |
)
|
| 91 |
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
|
|
|
|
| 141 |
|
| 142 |
seeds = [seed for seed in jax.random.randint(self.rng, [num_imgs], 0, 65536)]
|
| 143 |
prngs = [jax.random.PRNGKey(seed) for seed in seeds]
|
| 144 |
+
print(seeds)
|
| 145 |
images = self.pipe.generate_starting_frames(
|
| 146 |
+
params=self.p_params,
|
| 147 |
prngs=prngs,
|
| 148 |
controlnet_image=control,
|
| 149 |
prompt=prompts,
|
|
|
|
| 154 |
|
| 155 |
return images
|
| 156 |
|
| 157 |
+
def generate_video_from_frame(self, controlnet_video, prompt, seed, neg_prompt=""):
|
| 158 |
+
# generate a video using the seed provided
|
| 159 |
+
prng_seed = jax.random.PRNGKey(seed)
|
| 160 |
+
len_vid = controlnet_video.shape[0]
|
| 161 |
+
# print(f"Generating video from prompt {'<aardman> style '+ prompt}, with {controlnet_video.shape[0]} frames and prng seed {seed}")
|
| 162 |
+
added_prompt = "high quality, best quality, HD, clay stop-motion, claymation, HQ, masterpiece, art, smooth"
|
| 163 |
+
prompts = added_prompt + ", " + prompt
|
| 164 |
+
|
| 165 |
+
added_n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly"
|
| 166 |
+
negative_prompts = added_n_prompt + ", " + neg_prompt
|
| 167 |
+
|
| 168 |
+
# prompt_ids = self.pipe.prepare_text_inputs(["aardman style "+ prompt]*len_vid)
|
| 169 |
+
# n_prompt_ids = self.pipe.prepare_text_inputs([neg_prompt]*len_vid)
|
| 170 |
+
|
| 171 |
+
prompt_ids = self.pipe.prepare_text_inputs([prompts]*len_vid)
|
| 172 |
+
n_prompt_ids = self.pipe.prepare_text_inputs([negative_prompts]*len_vid)
|
| 173 |
+
prng = replicate_devices(prng_seed) #jax.random.split(prng, jax.device_count())
|
| 174 |
+
image = replicate_devices(controlnet_video)
|
| 175 |
+
prompt_ids = replicate_devices(prompt_ids)
|
| 176 |
+
n_prompt_ids = replicate_devices(n_prompt_ids)
|
| 177 |
+
motion_field_strength_x = replicate_devices(jnp.array(3))
|
| 178 |
+
motion_field_strength_y = replicate_devices(jnp.array(4))
|
| 179 |
+
smooth_bg_strength = replicate_devices(jnp.array(0.8))
|
| 180 |
+
vid = (self.pipe(image=image,
|
| 181 |
+
prompt_ids=prompt_ids,
|
| 182 |
+
neg_prompt_ids=n_prompt_ids,
|
| 183 |
+
params=self.p_params,
|
| 184 |
+
prng_seed=prng,
|
| 185 |
+
jit = True,
|
| 186 |
+
smooth_bg_strength=smooth_bg_strength,
|
| 187 |
+
motion_field_strength_x=motion_field_strength_x,
|
| 188 |
+
motion_field_strength_y=motion_field_strength_y,
|
| 189 |
+
).images)[0]
|
| 190 |
+
return utils.create_gif(np.array(vid), 4, path=None, watermark=None)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
def generate_animation(
|
| 194 |
self,
|
| 195 |
+
prompt, #: str,
|
| 196 |
+
initial_frame_index, #: int,
|
| 197 |
+
input_video_path, #: str,
|
| 198 |
+
model_link = None,#: str = "dreamlike-art/dreamlike-photoreal-2.0",
|
| 199 |
+
motion_field_strength_x = 12,#: int = 12,
|
| 200 |
+
motion_field_strength_y= 12,#: int = 12,
|
| 201 |
+
t0= 44,#: int = 44,
|
| 202 |
+
t1= 47,#: int = 47,
|
| 203 |
+
n_prompt= "",#: str = "",
|
| 204 |
+
chunk_size= 8, #: int = 8,
|
| 205 |
+
video_length = 8, #: int = 8,
|
| 206 |
+
merging_ratio = 0., #: float = 0.0,
|
| 207 |
+
seed= 0,#: int = 0,
|
| 208 |
+
resolution=512,#: int = 512,
|
| 209 |
+
fps=2,#: int = 2,
|
| 210 |
+
use_cf_attn=True,#: bool = True,
|
| 211 |
+
use_motion_field=True,#: bool = True,
|
| 212 |
+
smooth_bg=False,#: bool = False,
|
| 213 |
+
smooth_bg_strength=0.4,#: float = 0.4,
|
| 214 |
+
path=None,#: str = None,
|
| 215 |
):
|
| 216 |
+
video_path = gradio_utils.motion_to_video_path(input_video_path)
|
| 217 |
|
| 218 |
# added_prompt = 'best quality, HD, clay stop-motion, claymation, HQ, masterpiece, art, smooth'
|
| 219 |
# added_prompt = 'high quality, anatomically correct, clay stop-motion, aardman, claymation, smooth'
|
|
|
|
| 224 |
video_path, resolution, None, self.dtype, False, output_fps=4
|
| 225 |
)
|
| 226 |
control = utils.pre_process_pose(video, apply_pose_detect=False)
|
| 227 |
+
len_vid, _, h, w = video.shape
|
|
|
|
| 228 |
prng_seed = jax.random.PRNGKey(seed)
|
| 229 |
+
prompts = prompt
|
| 230 |
+
prompt_ids = self.pipe.prepare_text_inputs([prompts]*len_vid)
|
| 231 |
+
n_prompt_ids = self.pipe.prepare_text_inputs([negative_prompts]*len_vid)
|
| 232 |
+
prng = replicate_devices(prng_seed) #jax.random.split(prng, jax.device_count())
|
| 233 |
+
image = replicate_devices(control)
|
| 234 |
+
prompt_ids = replicate_devices(prompt_ids)
|
| 235 |
+
n_prompt_ids = replicate_devices(n_prompt_ids)
|
| 236 |
+
motion_field_strength_x = replicate_devices(jnp.array(motion_field_strength_x))
|
| 237 |
+
motion_field_strength_y = replicate_devices(jnp.array(motion_field_strength_y))
|
| 238 |
+
smooth_bg_strength = replicate_devices(jnp.array(smooth_bg_strength))
|
| 239 |
+
vid = (self.pipe(image=image,
|
| 240 |
+
prompt_ids=prompt_ids,
|
| 241 |
+
neg_prompt_ids=n_prompt_ids,
|
| 242 |
+
params=self.p_params,
|
| 243 |
+
prng_seed=prng,
|
| 244 |
+
jit = True,
|
| 245 |
+
smooth_bg_strength=smooth_bg_strength,
|
| 246 |
+
motion_field_strength_x=motion_field_strength_x,
|
| 247 |
+
motion_field_strength_y=motion_field_strength_y,
|
| 248 |
+
).images)[0]
|
| 249 |
return utils.create_gif(np.array(vid), 4, path=None, watermark=None)
|
text_to_animation/models/controlnet_flax.py
CHANGED
|
@@ -23,12 +23,10 @@ from diffusers.configuration_utils import ConfigMixin, flax_register_to_config
|
|
| 23 |
from diffusers.utils import BaseOutput
|
| 24 |
from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
|
| 25 |
from diffusers.models.modeling_flax_utils import FlaxModelMixin
|
| 26 |
-
from
|
| 27 |
FlaxCrossAttnDownBlock2D,
|
| 28 |
-
FlaxCrossAttnUpBlock2D,
|
| 29 |
FlaxDownBlock2D,
|
| 30 |
-
|
| 31 |
-
FlaxUpBlock2D,
|
| 32 |
)
|
| 33 |
|
| 34 |
|
|
@@ -171,18 +169,14 @@ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 171 |
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
| 172 |
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
|
| 173 |
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
| 174 |
-
encoder_hidden_states = jnp.zeros(
|
| 175 |
-
(1, 1, self.cross_attention_dim), dtype=jnp.float32
|
| 176 |
-
)
|
| 177 |
controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8)
|
| 178 |
controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32)
|
| 179 |
|
| 180 |
params_rng, dropout_rng = jax.random.split(rng)
|
| 181 |
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 182 |
|
| 183 |
-
return self.init(
|
| 184 |
-
rngs, sample, timesteps, encoder_hidden_states, controlnet_cond
|
| 185 |
-
)["params"]
|
| 186 |
|
| 187 |
def setup(self):
|
| 188 |
block_out_channels = self.block_out_channels
|
|
@@ -199,9 +193,7 @@ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 199 |
|
| 200 |
# time
|
| 201 |
self.time_proj = FlaxTimesteps(
|
| 202 |
-
block_out_channels[0],
|
| 203 |
-
flip_sin_to_cos=self.flip_sin_to_cos,
|
| 204 |
-
freq_shift=self.config.freq_shift,
|
| 205 |
)
|
| 206 |
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
| 207 |
|
|
@@ -290,7 +282,7 @@ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 290 |
|
| 291 |
# mid
|
| 292 |
mid_block_channel = block_out_channels[-1]
|
| 293 |
-
self.mid_block =
|
| 294 |
in_channels=mid_block_channel,
|
| 295 |
dropout=self.dropout,
|
| 296 |
attn_num_head_channels=attention_head_dim[-1],
|
|
@@ -361,23 +353,17 @@ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 361 |
down_block_res_samples = (sample,)
|
| 362 |
for down_block in self.down_blocks:
|
| 363 |
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
|
| 364 |
-
sample, res_samples = down_block(
|
| 365 |
-
sample, t_emb, encoder_hidden_states, deterministic=not train
|
| 366 |
-
)
|
| 367 |
else:
|
| 368 |
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
| 369 |
down_block_res_samples += res_samples
|
| 370 |
|
| 371 |
# 4. mid
|
| 372 |
-
sample = self.mid_block(
|
| 373 |
-
sample, t_emb, encoder_hidden_states, deterministic=not train
|
| 374 |
-
)
|
| 375 |
|
| 376 |
# 5. contronet blocks
|
| 377 |
controlnet_down_block_res_samples = ()
|
| 378 |
-
for down_block_res_sample, controlnet_block in zip(
|
| 379 |
-
down_block_res_samples, self.controlnet_down_blocks
|
| 380 |
-
):
|
| 381 |
down_block_res_sample = controlnet_block(down_block_res_sample)
|
| 382 |
controlnet_down_block_res_samples += (down_block_res_sample,)
|
| 383 |
|
|
@@ -386,15 +372,12 @@ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 386 |
mid_block_res_sample = self.controlnet_mid_block(sample)
|
| 387 |
|
| 388 |
# 6. scaling
|
| 389 |
-
down_block_res_samples = [
|
| 390 |
-
sample * conditioning_scale for sample in down_block_res_samples
|
| 391 |
-
]
|
| 392 |
mid_block_res_sample *= conditioning_scale
|
| 393 |
|
| 394 |
if not return_dict:
|
| 395 |
return (down_block_res_samples, mid_block_res_sample)
|
| 396 |
|
| 397 |
return FlaxControlNetOutput(
|
| 398 |
-
down_block_res_samples=down_block_res_samples,
|
| 399 |
-
|
| 400 |
-
)
|
|
|
|
| 23 |
from diffusers.utils import BaseOutput
|
| 24 |
from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
|
| 25 |
from diffusers.models.modeling_flax_utils import FlaxModelMixin
|
| 26 |
+
from .unet_2d_blocks_flax import (
|
| 27 |
FlaxCrossAttnDownBlock2D,
|
|
|
|
| 28 |
FlaxDownBlock2D,
|
| 29 |
+
FlaxUNetCrossAttnMidBlock2D,
|
|
|
|
| 30 |
)
|
| 31 |
|
| 32 |
|
|
|
|
| 169 |
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
| 170 |
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
|
| 171 |
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
| 172 |
+
encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
|
|
|
|
|
|
|
| 173 |
controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8)
|
| 174 |
controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32)
|
| 175 |
|
| 176 |
params_rng, dropout_rng = jax.random.split(rng)
|
| 177 |
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 178 |
|
| 179 |
+
return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"]
|
|
|
|
|
|
|
| 180 |
|
| 181 |
def setup(self):
|
| 182 |
block_out_channels = self.block_out_channels
|
|
|
|
| 193 |
|
| 194 |
# time
|
| 195 |
self.time_proj = FlaxTimesteps(
|
| 196 |
+
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
|
|
|
|
|
|
|
| 197 |
)
|
| 198 |
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
| 199 |
|
|
|
|
| 282 |
|
| 283 |
# mid
|
| 284 |
mid_block_channel = block_out_channels[-1]
|
| 285 |
+
self.mid_block = FlaxUNetCrossAttnMidBlock2D(
|
| 286 |
in_channels=mid_block_channel,
|
| 287 |
dropout=self.dropout,
|
| 288 |
attn_num_head_channels=attention_head_dim[-1],
|
|
|
|
| 353 |
down_block_res_samples = (sample,)
|
| 354 |
for down_block in self.down_blocks:
|
| 355 |
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
|
| 356 |
+
sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
|
|
|
|
|
|
| 357 |
else:
|
| 358 |
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
| 359 |
down_block_res_samples += res_samples
|
| 360 |
|
| 361 |
# 4. mid
|
| 362 |
+
sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
|
|
|
|
|
|
| 363 |
|
| 364 |
# 5. contronet blocks
|
| 365 |
controlnet_down_block_res_samples = ()
|
| 366 |
+
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
|
|
|
|
|
|
|
| 367 |
down_block_res_sample = controlnet_block(down_block_res_sample)
|
| 368 |
controlnet_down_block_res_samples += (down_block_res_sample,)
|
| 369 |
|
|
|
|
| 372 |
mid_block_res_sample = self.controlnet_mid_block(sample)
|
| 373 |
|
| 374 |
# 6. scaling
|
| 375 |
+
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
|
|
|
|
|
|
| 376 |
mid_block_res_sample *= conditioning_scale
|
| 377 |
|
| 378 |
if not return_dict:
|
| 379 |
return (down_block_res_samples, mid_block_res_sample)
|
| 380 |
|
| 381 |
return FlaxControlNetOutput(
|
| 382 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
| 383 |
+
)
|
|
|
text_to_animation/models/cross_frame_attention_flax.py
CHANGED
|
@@ -19,6 +19,8 @@ import flax.linen as nn
|
|
| 19 |
import jax
|
| 20 |
import jax.numpy as jnp
|
| 21 |
|
|
|
|
|
|
|
| 22 |
# from diffusers.models.attention_flax import FlaxBasicTransformerBlock
|
| 23 |
from diffusers.models.attention_flax import FlaxFeedForward, jax_memory_efficient_attention
|
| 24 |
|
|
@@ -32,7 +34,7 @@ def rearrange_4(array):
|
|
| 32 |
|
| 33 |
class FlaxCrossFrameAttention(nn.Module):
|
| 34 |
r"""
|
| 35 |
-
A Flax multi-head attention module
|
| 36 |
|
| 37 |
Parameters:
|
| 38 |
query_dim (:obj:`int`):
|
|
@@ -50,6 +52,7 @@ class FlaxCrossFrameAttention(nn.Module):
|
|
| 50 |
batch_size: The number that represents actual batch size, other than the frames.
|
| 51 |
For example, using calling unet with a single prompt and num_images_per_prompt=1, batch_size should be
|
| 52 |
equal to 2, due to classifier-free guidance.
|
|
|
|
| 53 |
"""
|
| 54 |
query_dim: int
|
| 55 |
heads: int = 8
|
|
@@ -152,6 +155,173 @@ class FlaxCrossFrameAttention(nn.Module):
|
|
| 152 |
hidden_states = self.proj_attn(hidden_states)
|
| 153 |
return hidden_states
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
class FlaxBasicTransformerBlock(nn.Module):
|
| 156 |
r"""
|
| 157 |
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
|
|
@@ -222,6 +392,76 @@ class FlaxBasicTransformerBlock(nn.Module):
|
|
| 222 |
|
| 223 |
return hidden_states
|
| 224 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
class FlaxCrossFrameTransformer2DModel(nn.Module):
|
| 227 |
r"""
|
|
@@ -320,4 +560,99 @@ class FlaxCrossFrameTransformer2DModel(nn.Module):
|
|
| 320 |
hidden_states = hidden_states + residual
|
| 321 |
return hidden_states
|
| 322 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 323 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
import jax
|
| 20 |
import jax.numpy as jnp
|
| 21 |
|
| 22 |
+
from einops import repeat
|
| 23 |
+
|
| 24 |
# from diffusers.models.attention_flax import FlaxBasicTransformerBlock
|
| 25 |
from diffusers.models.attention_flax import FlaxFeedForward, jax_memory_efficient_attention
|
| 26 |
|
|
|
|
| 34 |
|
| 35 |
class FlaxCrossFrameAttention(nn.Module):
|
| 36 |
r"""
|
| 37 |
+
A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
|
| 38 |
|
| 39 |
Parameters:
|
| 40 |
query_dim (:obj:`int`):
|
|
|
|
| 52 |
batch_size: The number that represents actual batch size, other than the frames.
|
| 53 |
For example, using calling unet with a single prompt and num_images_per_prompt=1, batch_size should be
|
| 54 |
equal to 2, due to classifier-free guidance.
|
| 55 |
+
|
| 56 |
"""
|
| 57 |
query_dim: int
|
| 58 |
heads: int = 8
|
|
|
|
| 155 |
hidden_states = self.proj_attn(hidden_states)
|
| 156 |
return hidden_states
|
| 157 |
|
| 158 |
+
class FlaxLoRALinearLayer(nn.Module):
|
| 159 |
+
out_features: int
|
| 160 |
+
dtype: jnp.dtype = jnp.float32
|
| 161 |
+
rank: int=4
|
| 162 |
+
|
| 163 |
+
def setup(self):
|
| 164 |
+
self.down = nn.Dense(self.rank, use_bias=False, kernel_init=nn.initializers.normal(stddev=1 / self.rank), dtype=self.dtype, name="down_lora")
|
| 165 |
+
self.up = nn.Dense(self.out_features, use_bias=False, kernel_init=nn.initializers.zeros, dtype=self.dtype, name="up_lora")
|
| 166 |
+
|
| 167 |
+
def __call__(self, hidden_states):
|
| 168 |
+
down_hidden_states = self.down(hidden_states)
|
| 169 |
+
up_hidden_states = self.up(down_hidden_states)
|
| 170 |
+
return up_hidden_states
|
| 171 |
+
|
| 172 |
+
class LoRAPositionalEncoding(nn.Module):
|
| 173 |
+
d_model : int # Hidden dimensionality of the input.
|
| 174 |
+
rank: int=4
|
| 175 |
+
dtype: jnp.dtype = jnp.float32
|
| 176 |
+
max_len : int = 200 # Maximum length of a sequence to expect.
|
| 177 |
+
|
| 178 |
+
def setup(self):
|
| 179 |
+
# Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs
|
| 180 |
+
pe = jnp.zeros((self.max_len, self.d_model), dtype=self.dtype)
|
| 181 |
+
position = jnp.arange(0, self.max_len, dtype=self.dtype)[:,None]
|
| 182 |
+
div_term = jnp.exp(jnp.arange(0, self.d_model, 2) * (-jnp.log(10000.0) / self.d_model))
|
| 183 |
+
pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))
|
| 184 |
+
pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))
|
| 185 |
+
self.pe = pe
|
| 186 |
+
self.lora_pe = FlaxLoRALinearLayer(self.d_model, rank=self.rank, dtype=self.dtype)
|
| 187 |
+
|
| 188 |
+
def __call__(self, x):
|
| 189 |
+
#x is (F // f, f, D, C)
|
| 190 |
+
b, f, d, c = x.shape
|
| 191 |
+
pe = repeat(self.lora_pe(self.pe[:f]), 'f c -> b f d c', b=b, d=d)
|
| 192 |
+
return x + pe
|
| 193 |
+
|
| 194 |
+
class FlaxLoRACrossFrameAttention(nn.Module):
|
| 195 |
+
r"""
|
| 196 |
+
A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
|
| 197 |
+
|
| 198 |
+
Parameters:
|
| 199 |
+
query_dim (:obj:`int`):
|
| 200 |
+
Input hidden states dimension
|
| 201 |
+
heads (:obj:`int`, *optional*, defaults to 8):
|
| 202 |
+
Number of heads
|
| 203 |
+
dim_head (:obj:`int`, *optional*, defaults to 64):
|
| 204 |
+
Hidden states dimension inside each head
|
| 205 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 206 |
+
Dropout rate
|
| 207 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 208 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 209 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 210 |
+
Parameters `dtype`
|
| 211 |
+
batch_size: The number that represents actual batch size, other than the frames.
|
| 212 |
+
For example, using calling unet with a single prompt and num_images_per_prompt=1, batch_size should be
|
| 213 |
+
equal to 2, due to classifier-free guidance.
|
| 214 |
+
|
| 215 |
+
"""
|
| 216 |
+
query_dim: int
|
| 217 |
+
heads: int = 8
|
| 218 |
+
dim_head: int = 64
|
| 219 |
+
dropout: float = 0.0
|
| 220 |
+
use_memory_efficient_attention: bool = False
|
| 221 |
+
dtype: jnp.dtype = jnp.float32
|
| 222 |
+
batch_size : int = 2
|
| 223 |
+
rank: int=4
|
| 224 |
+
|
| 225 |
+
def setup(self):
|
| 226 |
+
inner_dim = self.dim_head * self.heads
|
| 227 |
+
self.scale = self.dim_head**-0.5
|
| 228 |
+
|
| 229 |
+
# Weights were exported with old names {to_q, to_k, to_v, to_out}
|
| 230 |
+
self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
|
| 231 |
+
self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
|
| 232 |
+
self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
|
| 233 |
+
|
| 234 |
+
self.add_k_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype)
|
| 235 |
+
self.add_v_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype)
|
| 236 |
+
|
| 237 |
+
self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
|
| 238 |
+
|
| 239 |
+
self.to_q_lora = FlaxLoRALinearLayer(inner_dim, rank=self.rank, dtype=self.dtype)
|
| 240 |
+
self.to_k_lora = FlaxLoRALinearLayer(inner_dim, rank=self.rank, dtype=self.dtype)
|
| 241 |
+
self.to_v_lora = FlaxLoRALinearLayer(inner_dim, rank=self.rank, dtype=self.dtype)
|
| 242 |
+
self.to_out_lora = FlaxLoRALinearLayer(inner_dim, rank=self.rank, dtype=self.dtype)
|
| 243 |
+
|
| 244 |
+
def reshape_heads_to_batch_dim(self, tensor):
|
| 245 |
+
batch_size, seq_len, dim = tensor.shape
|
| 246 |
+
head_size = self.heads
|
| 247 |
+
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
|
| 248 |
+
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
| 249 |
+
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
|
| 250 |
+
return tensor
|
| 251 |
+
|
| 252 |
+
def reshape_batch_dim_to_heads(self, tensor):
|
| 253 |
+
batch_size, seq_len, dim = tensor.shape
|
| 254 |
+
head_size = self.heads
|
| 255 |
+
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
|
| 256 |
+
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
| 257 |
+
tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
|
| 258 |
+
return tensor
|
| 259 |
+
|
| 260 |
+
def __call__(self, hidden_states, context=None, deterministic=True, scale=1.):
|
| 261 |
+
is_cross_attention = context is not None
|
| 262 |
+
context = hidden_states if context is None else context
|
| 263 |
+
query_proj = self.query(hidden_states) + scale * self.to_q_lora(hidden_states)
|
| 264 |
+
key_proj = self.key(context) + scale * self.to_k_lora(context)
|
| 265 |
+
value_proj = self.value(context) + scale * self.to_v_lora(context)
|
| 266 |
+
|
| 267 |
+
# Sparse Attention
|
| 268 |
+
if not is_cross_attention:
|
| 269 |
+
video_length = 1 if key_proj.shape[0] < self.batch_size else key_proj.shape[0] // self.batch_size
|
| 270 |
+
first_frame_index = [0] * video_length
|
| 271 |
+
#first frame ==> previous frame
|
| 272 |
+
previous_frame_index = jnp.array([0] + list(range(video_length - 1)))
|
| 273 |
+
|
| 274 |
+
# rearrange keys to have batch and frames in the 1st and 2nd dims respectively
|
| 275 |
+
key_proj = rearrange_3(key_proj, video_length)
|
| 276 |
+
key_proj = key_proj[:, first_frame_index]
|
| 277 |
+
# rearrange values to have batch and frames in the 1st and 2nd dims respectively
|
| 278 |
+
value_proj = rearrange_3(value_proj, video_length)
|
| 279 |
+
value_proj = value_proj[:, first_frame_index]
|
| 280 |
+
|
| 281 |
+
# rearrange back to original shape
|
| 282 |
+
key_proj = rearrange_4(key_proj)
|
| 283 |
+
value_proj = rearrange_4(value_proj)
|
| 284 |
+
|
| 285 |
+
query_states = self.reshape_heads_to_batch_dim(query_proj)
|
| 286 |
+
key_states = self.reshape_heads_to_batch_dim(key_proj)
|
| 287 |
+
value_states = self.reshape_heads_to_batch_dim(value_proj)
|
| 288 |
+
|
| 289 |
+
if self.use_memory_efficient_attention:
|
| 290 |
+
query_states = query_states.transpose(1, 0, 2)
|
| 291 |
+
key_states = key_states.transpose(1, 0, 2)
|
| 292 |
+
value_states = value_states.transpose(1, 0, 2)
|
| 293 |
+
|
| 294 |
+
# this if statement create a chunk size for each layer of the unet
|
| 295 |
+
# the chunk size is equal to the query_length dimension of the deepest layer of the unet
|
| 296 |
+
|
| 297 |
+
flatten_latent_dim = query_states.shape[-3]
|
| 298 |
+
if flatten_latent_dim % 64 == 0:
|
| 299 |
+
query_chunk_size = int(flatten_latent_dim / 64)
|
| 300 |
+
elif flatten_latent_dim % 16 == 0:
|
| 301 |
+
query_chunk_size = int(flatten_latent_dim / 16)
|
| 302 |
+
elif flatten_latent_dim % 4 == 0:
|
| 303 |
+
query_chunk_size = int(flatten_latent_dim / 4)
|
| 304 |
+
else:
|
| 305 |
+
query_chunk_size = int(flatten_latent_dim)
|
| 306 |
+
|
| 307 |
+
hidden_states = jax_memory_efficient_attention(
|
| 308 |
+
query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
hidden_states = hidden_states.transpose(1, 0, 2)
|
| 312 |
+
else:
|
| 313 |
+
# compute attentions
|
| 314 |
+
attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
|
| 315 |
+
attention_scores = attention_scores * self.scale
|
| 316 |
+
attention_probs = nn.softmax(attention_scores, axis=2)
|
| 317 |
+
|
| 318 |
+
# attend to values
|
| 319 |
+
hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
|
| 320 |
+
|
| 321 |
+
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
| 322 |
+
hidden_states = self.proj_attn(hidden_states) + scale * self.to_out_lora(hidden_states)
|
| 323 |
+
return hidden_states
|
| 324 |
+
|
| 325 |
class FlaxBasicTransformerBlock(nn.Module):
|
| 326 |
r"""
|
| 327 |
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
|
|
|
|
| 392 |
|
| 393 |
return hidden_states
|
| 394 |
|
| 395 |
+
class FlaxLoRABasicTransformerBlock(nn.Module):
|
| 396 |
+
r"""
|
| 397 |
+
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
|
| 398 |
+
https://arxiv.org/abs/1706.03762
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
Parameters:
|
| 402 |
+
dim (:obj:`int`):
|
| 403 |
+
Inner hidden states dimension
|
| 404 |
+
n_heads (:obj:`int`):
|
| 405 |
+
Number of heads
|
| 406 |
+
d_head (:obj:`int`):
|
| 407 |
+
Hidden states dimension inside each head
|
| 408 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 409 |
+
Dropout rate
|
| 410 |
+
only_cross_attention (`bool`, defaults to `False`):
|
| 411 |
+
Whether to only apply cross attention.
|
| 412 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 413 |
+
Parameters `dtype`
|
| 414 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 415 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 416 |
+
"""
|
| 417 |
+
dim: int
|
| 418 |
+
n_heads: int
|
| 419 |
+
d_head: int
|
| 420 |
+
dropout: float = 0.0
|
| 421 |
+
only_cross_attention: bool = False
|
| 422 |
+
dtype: jnp.dtype = jnp.float32
|
| 423 |
+
use_memory_efficient_attention: bool = False
|
| 424 |
+
|
| 425 |
+
def setup(self):
|
| 426 |
+
|
| 427 |
+
# self attention (or cross_attention if only_cross_attention is True)
|
| 428 |
+
self.attn1 = FlaxLoRACrossFrameAttention(
|
| 429 |
+
self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, dtype=self.dtype,
|
| 430 |
+
)
|
| 431 |
+
# cross attention
|
| 432 |
+
self.attn2 = FlaxLoRACrossFrameAttention(
|
| 433 |
+
self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, dtype=self.dtype,
|
| 434 |
+
)
|
| 435 |
+
self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
|
| 436 |
+
self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
| 437 |
+
self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
| 438 |
+
self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def __call__(self, hidden_states, context, deterministic=True, scale=1.):
|
| 442 |
+
# self attention
|
| 443 |
+
residual = hidden_states
|
| 444 |
+
|
| 445 |
+
if self.only_cross_attention:
|
| 446 |
+
hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic, scale=scale)
|
| 447 |
+
else:
|
| 448 |
+
hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic, scale=scale)
|
| 449 |
+
hidden_states = hidden_states + residual
|
| 450 |
+
|
| 451 |
+
# cross attention
|
| 452 |
+
residual = hidden_states
|
| 453 |
+
|
| 454 |
+
hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic, scale=scale)
|
| 455 |
+
|
| 456 |
+
hidden_states = hidden_states + residual
|
| 457 |
+
|
| 458 |
+
# feed forward
|
| 459 |
+
residual = hidden_states
|
| 460 |
+
hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
|
| 461 |
+
hidden_states = hidden_states + residual
|
| 462 |
+
|
| 463 |
+
return hidden_states
|
| 464 |
+
|
| 465 |
|
| 466 |
class FlaxCrossFrameTransformer2DModel(nn.Module):
|
| 467 |
r"""
|
|
|
|
| 560 |
hidden_states = hidden_states + residual
|
| 561 |
return hidden_states
|
| 562 |
|
| 563 |
+
class FlaxLoRACrossFrameTransformer2DModel(nn.Module):
|
| 564 |
+
r"""
|
| 565 |
+
A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
|
| 566 |
+
https://arxiv.org/pdf/1506.02025.pdf
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
Parameters:
|
| 570 |
+
in_channels (:obj:`int`):
|
| 571 |
+
Input number of channels
|
| 572 |
+
n_heads (:obj:`int`):
|
| 573 |
+
Number of heads
|
| 574 |
+
d_head (:obj:`int`):
|
| 575 |
+
Hidden states dimension inside each head
|
| 576 |
+
depth (:obj:`int`, *optional*, defaults to 1):
|
| 577 |
+
Number of transformers block
|
| 578 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 579 |
+
Dropout rate
|
| 580 |
+
use_linear_projection (`bool`, defaults to `False`): tbd
|
| 581 |
+
only_cross_attention (`bool`, defaults to `False`): tbd
|
| 582 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 583 |
+
Parameters `dtype`
|
| 584 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 585 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 586 |
+
"""
|
| 587 |
+
in_channels: int
|
| 588 |
+
n_heads: int
|
| 589 |
+
d_head: int
|
| 590 |
+
depth: int = 1
|
| 591 |
+
dropout: float = 0.0
|
| 592 |
+
use_linear_projection: bool = False
|
| 593 |
+
only_cross_attention: bool = False
|
| 594 |
+
dtype: jnp.dtype = jnp.float32
|
| 595 |
+
use_memory_efficient_attention: bool = False
|
| 596 |
+
|
| 597 |
+
def setup(self):
|
| 598 |
+
self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
| 599 |
+
|
| 600 |
+
inner_dim = self.n_heads * self.d_head
|
| 601 |
+
if self.use_linear_projection:
|
| 602 |
+
self.proj_in = nn.Dense(inner_dim, dtype=self.dtype)
|
| 603 |
+
else:
|
| 604 |
+
self.proj_in = nn.Conv(
|
| 605 |
+
inner_dim,
|
| 606 |
+
kernel_size=(1, 1),
|
| 607 |
+
strides=(1, 1),
|
| 608 |
+
padding="VALID",
|
| 609 |
+
dtype=self.dtype,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
self.transformer_blocks = [
|
| 613 |
+
FlaxLoRABasicTransformerBlock(
|
| 614 |
+
inner_dim,
|
| 615 |
+
self.n_heads,
|
| 616 |
+
self.d_head,
|
| 617 |
+
dropout=self.dropout,
|
| 618 |
+
only_cross_attention=self.only_cross_attention,
|
| 619 |
+
dtype=self.dtype,
|
| 620 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 621 |
+
)
|
| 622 |
+
for _ in range(self.depth)
|
| 623 |
+
]
|
| 624 |
+
|
| 625 |
+
if self.use_linear_projection:
|
| 626 |
+
self.proj_out = nn.Dense(inner_dim, dtype=self.dtype)
|
| 627 |
+
else:
|
| 628 |
+
self.proj_out = nn.Conv(
|
| 629 |
+
inner_dim,
|
| 630 |
+
kernel_size=(1, 1),
|
| 631 |
+
strides=(1, 1),
|
| 632 |
+
padding="VALID",
|
| 633 |
+
dtype=self.dtype,
|
| 634 |
+
)
|
| 635 |
|
| 636 |
+
def __call__(self, hidden_states, context, deterministic=True, scale=1.0):
|
| 637 |
+
batch, height, width, channels = hidden_states.shape
|
| 638 |
+
residual = hidden_states
|
| 639 |
+
hidden_states = self.norm(hidden_states)
|
| 640 |
+
if self.use_linear_projection:
|
| 641 |
+
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
| 642 |
+
hidden_states = self.proj_in(hidden_states)
|
| 643 |
+
else:
|
| 644 |
+
hidden_states = self.proj_in(hidden_states)
|
| 645 |
+
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
| 646 |
+
|
| 647 |
+
for transformer_block in self.transformer_blocks:
|
| 648 |
+
hidden_states = transformer_block(hidden_states, context, deterministic=deterministic, scale=scale)
|
| 649 |
+
|
| 650 |
+
if self.use_linear_projection:
|
| 651 |
+
hidden_states = self.proj_out(hidden_states)
|
| 652 |
+
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
| 653 |
+
else:
|
| 654 |
+
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
| 655 |
+
hidden_states = self.proj_out(hidden_states)
|
| 656 |
+
|
| 657 |
+
hidden_states = hidden_states + residual
|
| 658 |
+
return hidden_states
|
text_to_animation/models/unet_2d_blocks_flax.py
CHANGED
|
@@ -17,7 +17,7 @@ import jax.numpy as jnp
|
|
| 17 |
|
| 18 |
# from diffusers.models.attention_flax import FlaxTransformer2DModel
|
| 19 |
from diffusers.models.resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D
|
| 20 |
-
from .cross_frame_attention_flax import FlaxCrossFrameTransformer2DModel
|
| 21 |
|
| 22 |
class FlaxCrossAttnDownBlock2D(nn.Module):
|
| 23 |
r"""
|
|
@@ -100,6 +100,87 @@ class FlaxCrossAttnDownBlock2D(nn.Module):
|
|
| 100 |
return hidden_states, output_states
|
| 101 |
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
class FlaxDownBlock2D(nn.Module):
|
| 104 |
r"""
|
| 105 |
Flax 2D downsizing block
|
|
@@ -240,6 +321,90 @@ class FlaxCrossAttnUpBlock2D(nn.Module):
|
|
| 240 |
return hidden_states
|
| 241 |
|
| 242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
class FlaxUpBlock2D(nn.Module):
|
| 244 |
r"""
|
| 245 |
Flax 2D upsampling block
|
|
@@ -302,7 +467,7 @@ class FlaxUpBlock2D(nn.Module):
|
|
| 302 |
return hidden_states
|
| 303 |
|
| 304 |
|
| 305 |
-
class
|
| 306 |
r"""
|
| 307 |
Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104
|
| 308 |
Parameters:
|
|
@@ -369,4 +534,74 @@ class FlaxUNetMidBlock2DCrossAttn(nn.Module):
|
|
| 369 |
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
|
| 370 |
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
|
| 371 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
return hidden_states
|
|
|
|
| 17 |
|
| 18 |
# from diffusers.models.attention_flax import FlaxTransformer2DModel
|
| 19 |
from diffusers.models.resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D
|
| 20 |
+
from .cross_frame_attention_flax import FlaxCrossFrameTransformer2DModel, FlaxLoRACrossFrameTransformer2DModel
|
| 21 |
|
| 22 |
class FlaxCrossAttnDownBlock2D(nn.Module):
|
| 23 |
r"""
|
|
|
|
| 100 |
return hidden_states, output_states
|
| 101 |
|
| 102 |
|
| 103 |
+
class FlaxLoRACrossAttnDownBlock2D(nn.Module):
|
| 104 |
+
r"""
|
| 105 |
+
Cross Attention 2D Downsizing block - original architecture from Unet transformers:
|
| 106 |
+
https://arxiv.org/abs/2103.06104
|
| 107 |
+
Parameters:
|
| 108 |
+
in_channels (:obj:`int`):
|
| 109 |
+
Input channels
|
| 110 |
+
out_channels (:obj:`int`):
|
| 111 |
+
Output channels
|
| 112 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 113 |
+
Dropout rate
|
| 114 |
+
num_layers (:obj:`int`, *optional*, defaults to 1):
|
| 115 |
+
Number of attention blocks layers
|
| 116 |
+
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
|
| 117 |
+
Number of attention heads of each spatial transformer block
|
| 118 |
+
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
|
| 119 |
+
Whether to add downsampling layer before each final output
|
| 120 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 121 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 122 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 123 |
+
Parameters `dtype`
|
| 124 |
+
"""
|
| 125 |
+
in_channels: int
|
| 126 |
+
out_channels: int
|
| 127 |
+
dropout: float = 0.0
|
| 128 |
+
num_layers: int = 1
|
| 129 |
+
attn_num_head_channels: int = 1
|
| 130 |
+
add_downsample: bool = True
|
| 131 |
+
use_linear_projection: bool = False
|
| 132 |
+
only_cross_attention: bool = False
|
| 133 |
+
use_memory_efficient_attention: bool = False
|
| 134 |
+
dtype: jnp.dtype = jnp.float32
|
| 135 |
+
|
| 136 |
+
def setup(self):
|
| 137 |
+
resnets = []
|
| 138 |
+
attentions = []
|
| 139 |
+
|
| 140 |
+
for i in range(self.num_layers):
|
| 141 |
+
in_channels = self.in_channels if i == 0 else self.out_channels
|
| 142 |
+
|
| 143 |
+
res_block = FlaxResnetBlock2D(
|
| 144 |
+
in_channels=in_channels,
|
| 145 |
+
out_channels=self.out_channels,
|
| 146 |
+
dropout_prob=self.dropout,
|
| 147 |
+
dtype=self.dtype,
|
| 148 |
+
)
|
| 149 |
+
resnets.append(res_block)
|
| 150 |
+
|
| 151 |
+
attn_block = FlaxLoRACrossFrameTransformer2DModel(
|
| 152 |
+
in_channels=self.out_channels,
|
| 153 |
+
n_heads=self.attn_num_head_channels,
|
| 154 |
+
d_head=self.out_channels // self.attn_num_head_channels,
|
| 155 |
+
depth=1,
|
| 156 |
+
use_linear_projection=self.use_linear_projection,
|
| 157 |
+
only_cross_attention=self.only_cross_attention,
|
| 158 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 159 |
+
dtype=self.dtype,
|
| 160 |
+
)
|
| 161 |
+
attentions.append(attn_block)
|
| 162 |
+
|
| 163 |
+
self.resnets = resnets
|
| 164 |
+
self.attentions = attentions
|
| 165 |
+
|
| 166 |
+
if self.add_downsample:
|
| 167 |
+
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
|
| 168 |
+
|
| 169 |
+
def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True, scale=1.):
|
| 170 |
+
output_states = ()
|
| 171 |
+
|
| 172 |
+
for resnet, attn in zip(self.resnets, self.attentions):
|
| 173 |
+
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
|
| 174 |
+
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic, scale=scale)
|
| 175 |
+
output_states += (hidden_states,)
|
| 176 |
+
|
| 177 |
+
if self.add_downsample:
|
| 178 |
+
hidden_states = self.downsamplers_0(hidden_states)
|
| 179 |
+
output_states += (hidden_states,)
|
| 180 |
+
|
| 181 |
+
return hidden_states, output_states
|
| 182 |
+
|
| 183 |
+
|
| 184 |
class FlaxDownBlock2D(nn.Module):
|
| 185 |
r"""
|
| 186 |
Flax 2D downsizing block
|
|
|
|
| 321 |
return hidden_states
|
| 322 |
|
| 323 |
|
| 324 |
+
class FlaxLoRACrossAttnUpBlock2D(nn.Module):
|
| 325 |
+
r"""
|
| 326 |
+
Cross Attention 2D Upsampling block - original architecture from Unet transformers:
|
| 327 |
+
https://arxiv.org/abs/2103.06104
|
| 328 |
+
Parameters:
|
| 329 |
+
in_channels (:obj:`int`):
|
| 330 |
+
Input channels
|
| 331 |
+
out_channels (:obj:`int`):
|
| 332 |
+
Output channels
|
| 333 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 334 |
+
Dropout rate
|
| 335 |
+
num_layers (:obj:`int`, *optional*, defaults to 1):
|
| 336 |
+
Number of attention blocks layers
|
| 337 |
+
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
|
| 338 |
+
Number of attention heads of each spatial transformer block
|
| 339 |
+
add_upsample (:obj:`bool`, *optional*, defaults to `True`):
|
| 340 |
+
Whether to add upsampling layer before each final output
|
| 341 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 342 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 343 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 344 |
+
Parameters `dtype`
|
| 345 |
+
"""
|
| 346 |
+
in_channels: int
|
| 347 |
+
out_channels: int
|
| 348 |
+
prev_output_channel: int
|
| 349 |
+
dropout: float = 0.0
|
| 350 |
+
num_layers: int = 1
|
| 351 |
+
attn_num_head_channels: int = 1
|
| 352 |
+
add_upsample: bool = True
|
| 353 |
+
use_linear_projection: bool = False
|
| 354 |
+
only_cross_attention: bool = False
|
| 355 |
+
use_memory_efficient_attention: bool = False
|
| 356 |
+
dtype: jnp.dtype = jnp.float32
|
| 357 |
+
|
| 358 |
+
def setup(self):
|
| 359 |
+
resnets = []
|
| 360 |
+
attentions = []
|
| 361 |
+
|
| 362 |
+
for i in range(self.num_layers):
|
| 363 |
+
res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels
|
| 364 |
+
resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels
|
| 365 |
+
|
| 366 |
+
res_block = FlaxResnetBlock2D(
|
| 367 |
+
in_channels=resnet_in_channels + res_skip_channels,
|
| 368 |
+
out_channels=self.out_channels,
|
| 369 |
+
dropout_prob=self.dropout,
|
| 370 |
+
dtype=self.dtype,
|
| 371 |
+
)
|
| 372 |
+
resnets.append(res_block)
|
| 373 |
+
|
| 374 |
+
attn_block = FlaxLoRACrossFrameTransformer2DModel(
|
| 375 |
+
in_channels=self.out_channels,
|
| 376 |
+
n_heads=self.attn_num_head_channels,
|
| 377 |
+
d_head=self.out_channels // self.attn_num_head_channels,
|
| 378 |
+
depth=1,
|
| 379 |
+
use_linear_projection=self.use_linear_projection,
|
| 380 |
+
only_cross_attention=self.only_cross_attention,
|
| 381 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 382 |
+
dtype=self.dtype,
|
| 383 |
+
)
|
| 384 |
+
attentions.append(attn_block)
|
| 385 |
+
|
| 386 |
+
self.resnets = resnets
|
| 387 |
+
self.attentions = attentions
|
| 388 |
+
|
| 389 |
+
if self.add_upsample:
|
| 390 |
+
self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)
|
| 391 |
+
|
| 392 |
+
def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True, scale=1.):
|
| 393 |
+
for resnet, attn in zip(self.resnets, self.attentions):
|
| 394 |
+
# pop res hidden states
|
| 395 |
+
res_hidden_states = res_hidden_states_tuple[-1]
|
| 396 |
+
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
| 397 |
+
hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
|
| 398 |
+
|
| 399 |
+
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
|
| 400 |
+
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic, scale=scale)
|
| 401 |
+
|
| 402 |
+
if self.add_upsample:
|
| 403 |
+
hidden_states = self.upsamplers_0(hidden_states)
|
| 404 |
+
|
| 405 |
+
return hidden_states
|
| 406 |
+
|
| 407 |
+
|
| 408 |
class FlaxUpBlock2D(nn.Module):
|
| 409 |
r"""
|
| 410 |
Flax 2D upsampling block
|
|
|
|
| 467 |
return hidden_states
|
| 468 |
|
| 469 |
|
| 470 |
+
class FlaxUNetCrossAttnMidBlock2D(nn.Module):
|
| 471 |
r"""
|
| 472 |
Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104
|
| 473 |
Parameters:
|
|
|
|
| 534 |
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
|
| 535 |
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
|
| 536 |
|
| 537 |
+
return hidden_states
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
class FlaxLoRAUNetCrossAttnMidBlock2D(nn.Module):
|
| 541 |
+
r"""
|
| 542 |
+
Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104
|
| 543 |
+
Parameters:
|
| 544 |
+
in_channels (:obj:`int`):
|
| 545 |
+
Input channels
|
| 546 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
| 547 |
+
Dropout rate
|
| 548 |
+
num_layers (:obj:`int`, *optional*, defaults to 1):
|
| 549 |
+
Number of attention blocks layers
|
| 550 |
+
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
|
| 551 |
+
Number of attention heads of each spatial transformer block
|
| 552 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 553 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 554 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
| 555 |
+
Parameters `dtype`
|
| 556 |
+
"""
|
| 557 |
+
in_channels: int
|
| 558 |
+
dropout: float = 0.0
|
| 559 |
+
num_layers: int = 1
|
| 560 |
+
attn_num_head_channels: int = 1
|
| 561 |
+
use_linear_projection: bool = False
|
| 562 |
+
use_memory_efficient_attention: bool = False
|
| 563 |
+
dtype: jnp.dtype = jnp.float32
|
| 564 |
+
|
| 565 |
+
def setup(self):
|
| 566 |
+
# there is always at least one resnet
|
| 567 |
+
resnets = [
|
| 568 |
+
FlaxResnetBlock2D(
|
| 569 |
+
in_channels=self.in_channels,
|
| 570 |
+
out_channels=self.in_channels,
|
| 571 |
+
dropout_prob=self.dropout,
|
| 572 |
+
dtype=self.dtype,
|
| 573 |
+
)
|
| 574 |
+
]
|
| 575 |
+
|
| 576 |
+
attentions = []
|
| 577 |
+
|
| 578 |
+
for _ in range(self.num_layers):
|
| 579 |
+
attn_block = FlaxLoRACrossFrameTransformer2DModel(
|
| 580 |
+
in_channels=self.in_channels,
|
| 581 |
+
n_heads=self.attn_num_head_channels,
|
| 582 |
+
d_head=self.in_channels // self.attn_num_head_channels,
|
| 583 |
+
depth=1,
|
| 584 |
+
use_linear_projection=self.use_linear_projection,
|
| 585 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 586 |
+
dtype=self.dtype,
|
| 587 |
+
)
|
| 588 |
+
attentions.append(attn_block)
|
| 589 |
+
|
| 590 |
+
res_block = FlaxResnetBlock2D(
|
| 591 |
+
in_channels=self.in_channels,
|
| 592 |
+
out_channels=self.in_channels,
|
| 593 |
+
dropout_prob=self.dropout,
|
| 594 |
+
dtype=self.dtype,
|
| 595 |
+
)
|
| 596 |
+
resnets.append(res_block)
|
| 597 |
+
|
| 598 |
+
self.resnets = resnets
|
| 599 |
+
self.attentions = attentions
|
| 600 |
+
|
| 601 |
+
def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True, scale=1.):
|
| 602 |
+
hidden_states = self.resnets[0](hidden_states, temb)
|
| 603 |
+
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
| 604 |
+
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic, scale=scale)
|
| 605 |
+
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
|
| 606 |
+
|
| 607 |
return hidden_states
|
text_to_animation/models/unet_2d_condition_flax.py
CHANGED
|
@@ -26,15 +26,17 @@ from diffusers.configuration_utils import ConfigMixin, flax_register_to_config
|
|
| 26 |
from diffusers.utils import BaseOutput
|
| 27 |
from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
|
| 28 |
from diffusers.models.modeling_flax_utils import FlaxModelMixin
|
| 29 |
-
from
|
| 30 |
FlaxCrossAttnDownBlock2D,
|
| 31 |
FlaxCrossAttnUpBlock2D,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
FlaxDownBlock2D,
|
| 33 |
-
FlaxUNetMidBlock2DCrossAttn,
|
| 34 |
FlaxUpBlock2D,
|
| 35 |
)
|
| 36 |
|
| 37 |
-
|
| 38 |
@flax.struct.dataclass
|
| 39 |
class FlaxUNet2DConditionOutput(BaseOutput):
|
| 40 |
"""
|
|
@@ -105,12 +107,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 105 |
"CrossAttnDownBlock2D",
|
| 106 |
"DownBlock2D",
|
| 107 |
)
|
| 108 |
-
up_block_types: Tuple[str] = (
|
| 109 |
-
"UpBlock2D",
|
| 110 |
-
"CrossAttnUpBlock2D",
|
| 111 |
-
"CrossAttnUpBlock2D",
|
| 112 |
-
"CrossAttnUpBlock2D",
|
| 113 |
-
)
|
| 114 |
only_cross_attention: Union[bool, Tuple[bool]] = False
|
| 115 |
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
|
| 116 |
layers_per_block: int = 2
|
|
@@ -118,7 +115,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 118 |
cross_attention_dim: int = 1280
|
| 119 |
dropout: float = 0.0
|
| 120 |
use_linear_projection: bool = False
|
| 121 |
-
dtype: jnp.dtype = jnp.
|
| 122 |
flip_sin_to_cos: bool = True
|
| 123 |
freq_shift: int = 0
|
| 124 |
use_memory_efficient_attention: bool = False
|
|
@@ -126,11 +123,9 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 126 |
def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict:
|
| 127 |
# init input tensors
|
| 128 |
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
| 129 |
-
sample = jnp.zeros(sample_shape, dtype=
|
| 130 |
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
| 131 |
-
encoder_hidden_states = jnp.zeros(
|
| 132 |
-
(1, 1, self.cross_attention_dim), dtype=jnp.float32
|
| 133 |
-
)
|
| 134 |
|
| 135 |
params_rng, dropout_rng = jax.random.split(rng)
|
| 136 |
rngs = {"params": params_rng, "dropout": dropout_rng}
|
|
@@ -152,9 +147,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 152 |
|
| 153 |
# time
|
| 154 |
self.time_proj = FlaxTimesteps(
|
| 155 |
-
block_out_channels[0],
|
| 156 |
-
flip_sin_to_cos=self.flip_sin_to_cos,
|
| 157 |
-
freq_shift=self.config.freq_shift,
|
| 158 |
)
|
| 159 |
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
| 160 |
|
|
@@ -201,7 +194,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 201 |
self.down_blocks = down_blocks
|
| 202 |
|
| 203 |
# mid
|
| 204 |
-
self.mid_block =
|
| 205 |
in_channels=block_out_channels[-1],
|
| 206 |
dropout=self.dropout,
|
| 207 |
attn_num_head_channels=attention_head_dim[-1],
|
|
@@ -219,9 +212,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 219 |
for i, up_block_type in enumerate(self.up_block_types):
|
| 220 |
prev_output_channel = output_channel
|
| 221 |
output_channel = reversed_block_out_channels[i]
|
| 222 |
-
input_channel = reversed_block_out_channels[
|
| 223 |
-
min(i + 1, len(block_out_channels) - 1)
|
| 224 |
-
]
|
| 225 |
|
| 226 |
is_final_block = i == len(block_out_channels) - 1
|
| 227 |
|
|
@@ -308,9 +299,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 308 |
down_block_res_samples = (sample,)
|
| 309 |
for down_block in self.down_blocks:
|
| 310 |
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
|
| 311 |
-
sample, res_samples = down_block(
|
| 312 |
-
sample, t_emb, encoder_hidden_states, deterministic=not train
|
| 313 |
-
)
|
| 314 |
else:
|
| 315 |
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
| 316 |
down_block_res_samples += res_samples
|
|
@@ -327,9 +316,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 327 |
down_block_res_samples = new_down_block_res_samples
|
| 328 |
|
| 329 |
# 4. mid
|
| 330 |
-
sample = self.mid_block(
|
| 331 |
-
sample, t_emb, encoder_hidden_states, deterministic=not train
|
| 332 |
-
)
|
| 333 |
|
| 334 |
if mid_block_additional_residual is not None:
|
| 335 |
sample += mid_block_additional_residual
|
|
@@ -337,9 +324,7 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 337 |
# 5. up
|
| 338 |
for up_block in self.up_blocks:
|
| 339 |
res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
|
| 340 |
-
down_block_res_samples = down_block_res_samples[
|
| 341 |
-
: -(self.layers_per_block + 1)
|
| 342 |
-
]
|
| 343 |
if isinstance(up_block, FlaxCrossAttnUpBlock2D):
|
| 344 |
sample = up_block(
|
| 345 |
sample,
|
|
@@ -349,12 +334,321 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 349 |
deterministic=not train,
|
| 350 |
)
|
| 351 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 352 |
sample = up_block(
|
| 353 |
sample,
|
| 354 |
temb=t_emb,
|
|
|
|
| 355 |
res_hidden_states_tuple=res_samples,
|
| 356 |
deterministic=not train,
|
|
|
|
| 357 |
)
|
|
|
|
|
|
|
| 358 |
|
| 359 |
# 6. post-process
|
| 360 |
sample = self.conv_norm_out(sample)
|
|
@@ -365,4 +659,4 @@ class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
|
| 365 |
if not return_dict:
|
| 366 |
return (sample,)
|
| 367 |
|
| 368 |
-
return FlaxUNet2DConditionOutput(sample=sample)
|
|
|
|
| 26 |
from diffusers.utils import BaseOutput
|
| 27 |
from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
|
| 28 |
from diffusers.models.modeling_flax_utils import FlaxModelMixin
|
| 29 |
+
from .unet_2d_blocks_flax import (
|
| 30 |
FlaxCrossAttnDownBlock2D,
|
| 31 |
FlaxCrossAttnUpBlock2D,
|
| 32 |
+
FlaxUNetCrossAttnMidBlock2D,
|
| 33 |
+
FlaxLoRACrossAttnDownBlock2D,
|
| 34 |
+
FlaxLoRACrossAttnUpBlock2D,
|
| 35 |
+
FlaxLoRAUNetCrossAttnMidBlock2D,
|
| 36 |
FlaxDownBlock2D,
|
|
|
|
| 37 |
FlaxUpBlock2D,
|
| 38 |
)
|
| 39 |
|
|
|
|
| 40 |
@flax.struct.dataclass
|
| 41 |
class FlaxUNet2DConditionOutput(BaseOutput):
|
| 42 |
"""
|
|
|
|
| 107 |
"CrossAttnDownBlock2D",
|
| 108 |
"DownBlock2D",
|
| 109 |
)
|
| 110 |
+
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
only_cross_attention: Union[bool, Tuple[bool]] = False
|
| 112 |
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
|
| 113 |
layers_per_block: int = 2
|
|
|
|
| 115 |
cross_attention_dim: int = 1280
|
| 116 |
dropout: float = 0.0
|
| 117 |
use_linear_projection: bool = False
|
| 118 |
+
dtype: jnp.dtype = jnp.float16
|
| 119 |
flip_sin_to_cos: bool = True
|
| 120 |
freq_shift: int = 0
|
| 121 |
use_memory_efficient_attention: bool = False
|
|
|
|
| 123 |
def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict:
|
| 124 |
# init input tensors
|
| 125 |
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
| 126 |
+
sample = jnp.zeros(sample_shape, dtype=self.dtype)
|
| 127 |
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
| 128 |
+
encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=self.dtype)
|
|
|
|
|
|
|
| 129 |
|
| 130 |
params_rng, dropout_rng = jax.random.split(rng)
|
| 131 |
rngs = {"params": params_rng, "dropout": dropout_rng}
|
|
|
|
| 147 |
|
| 148 |
# time
|
| 149 |
self.time_proj = FlaxTimesteps(
|
| 150 |
+
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
|
|
|
|
|
|
|
| 151 |
)
|
| 152 |
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
| 153 |
|
|
|
|
| 194 |
self.down_blocks = down_blocks
|
| 195 |
|
| 196 |
# mid
|
| 197 |
+
self.mid_block = FlaxUNetCrossAttnMidBlock2D(
|
| 198 |
in_channels=block_out_channels[-1],
|
| 199 |
dropout=self.dropout,
|
| 200 |
attn_num_head_channels=attention_head_dim[-1],
|
|
|
|
| 212 |
for i, up_block_type in enumerate(self.up_block_types):
|
| 213 |
prev_output_channel = output_channel
|
| 214 |
output_channel = reversed_block_out_channels[i]
|
| 215 |
+
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
|
|
|
|
|
|
| 216 |
|
| 217 |
is_final_block = i == len(block_out_channels) - 1
|
| 218 |
|
|
|
|
| 299 |
down_block_res_samples = (sample,)
|
| 300 |
for down_block in self.down_blocks:
|
| 301 |
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
|
| 302 |
+
sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
|
|
|
|
|
|
| 303 |
else:
|
| 304 |
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
| 305 |
down_block_res_samples += res_samples
|
|
|
|
| 316 |
down_block_res_samples = new_down_block_res_samples
|
| 317 |
|
| 318 |
# 4. mid
|
| 319 |
+
sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
|
|
|
|
|
|
| 320 |
|
| 321 |
if mid_block_additional_residual is not None:
|
| 322 |
sample += mid_block_additional_residual
|
|
|
|
| 324 |
# 5. up
|
| 325 |
for up_block in self.up_blocks:
|
| 326 |
res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
|
| 327 |
+
down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]
|
|
|
|
|
|
|
| 328 |
if isinstance(up_block, FlaxCrossAttnUpBlock2D):
|
| 329 |
sample = up_block(
|
| 330 |
sample,
|
|
|
|
| 334 |
deterministic=not train,
|
| 335 |
)
|
| 336 |
else:
|
| 337 |
+
sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)
|
| 338 |
+
|
| 339 |
+
# 6. post-process
|
| 340 |
+
sample = self.conv_norm_out(sample)
|
| 341 |
+
sample = nn.silu(sample)
|
| 342 |
+
sample = self.conv_out(sample)
|
| 343 |
+
sample = jnp.transpose(sample, (0, 3, 1, 2))
|
| 344 |
+
|
| 345 |
+
if not return_dict:
|
| 346 |
+
return (sample,)
|
| 347 |
+
|
| 348 |
+
return FlaxUNet2DConditionOutput(sample=sample)
|
| 349 |
+
|
| 350 |
+
@flax_register_to_config
|
| 351 |
+
class FlaxLoRAUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
| 352 |
+
r"""
|
| 353 |
+
|
| 354 |
+
FlaxLoRAUNet2DConditionModel is a custom FlaxUNet2DConditionModel with a few tweaks:
|
| 355 |
+
- Cross Attention is replaced by Cross-Frame Attention
|
| 356 |
+
- Low Rank Adaptation (LoRA) layers are added to the Cross-Frame Attention
|
| 357 |
+
- An frame positional encoding is added to the encoder_hidden_states via a LoRA linear layer
|
| 358 |
+
|
| 359 |
+
FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a
|
| 360 |
+
timestep and returns sample shaped output.
|
| 361 |
+
|
| 362 |
+
This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library
|
| 363 |
+
implements for all the models (such as downloading or saving, etc.)
|
| 364 |
+
|
| 365 |
+
Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
|
| 366 |
+
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
|
| 367 |
+
general usage and behavior.
|
| 368 |
+
|
| 369 |
+
Finally, this model supports inherent JAX features such as:
|
| 370 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
| 371 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
| 372 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
| 373 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
| 374 |
+
|
| 375 |
+
Parameters:
|
| 376 |
+
sample_size (`int`, *optional*):
|
| 377 |
+
The size of the input sample.
|
| 378 |
+
in_channels (`int`, *optional*, defaults to 4):
|
| 379 |
+
The number of channels in the input sample.
|
| 380 |
+
out_channels (`int`, *optional*, defaults to 4):
|
| 381 |
+
The number of channels in the output.
|
| 382 |
+
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
|
| 383 |
+
The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D",
|
| 384 |
+
"FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D"
|
| 385 |
+
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
|
| 386 |
+
The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D",
|
| 387 |
+
"FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D"
|
| 388 |
+
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
| 389 |
+
The tuple of output channels for each block.
|
| 390 |
+
layers_per_block (`int`, *optional*, defaults to 2):
|
| 391 |
+
The number of layers per block.
|
| 392 |
+
attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8):
|
| 393 |
+
The dimension of the attention heads.
|
| 394 |
+
cross_attention_dim (`int`, *optional*, defaults to 768):
|
| 395 |
+
The dimension of the cross attention features.
|
| 396 |
+
dropout (`float`, *optional*, defaults to 0):
|
| 397 |
+
Dropout probability for down, up and bottleneck blocks.
|
| 398 |
+
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
|
| 399 |
+
Whether to flip the sin to cos in the time embedding.
|
| 400 |
+
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
|
| 401 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
| 402 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
| 403 |
+
|
| 404 |
+
"""
|
| 405 |
+
|
| 406 |
+
sample_size: int = 32
|
| 407 |
+
in_channels: int = 4
|
| 408 |
+
out_channels: int = 4
|
| 409 |
+
down_block_types: Tuple[str] = (
|
| 410 |
+
"CrossAttnDownBlock2D",
|
| 411 |
+
"CrossAttnDownBlock2D",
|
| 412 |
+
"CrossAttnDownBlock2D",
|
| 413 |
+
"DownBlock2D",
|
| 414 |
+
)
|
| 415 |
+
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
|
| 416 |
+
only_cross_attention: Union[bool, Tuple[bool]] = False
|
| 417 |
+
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
|
| 418 |
+
layers_per_block: int = 2
|
| 419 |
+
attention_head_dim: Union[int, Tuple[int]] = 8
|
| 420 |
+
cross_attention_dim: int = 1280
|
| 421 |
+
dropout: float = 0.0
|
| 422 |
+
use_linear_projection: bool = False
|
| 423 |
+
dtype: jnp.dtype = jnp.float16
|
| 424 |
+
flip_sin_to_cos: bool = True
|
| 425 |
+
freq_shift: int = 0
|
| 426 |
+
use_memory_efficient_attention: bool = False
|
| 427 |
+
|
| 428 |
+
def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict:
|
| 429 |
+
# init input tensors
|
| 430 |
+
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
| 431 |
+
sample = jnp.zeros(sample_shape, dtype=self.dtype)
|
| 432 |
+
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
| 433 |
+
encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=self.dtype)
|
| 434 |
+
|
| 435 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 436 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 437 |
+
|
| 438 |
+
return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"]
|
| 439 |
+
|
| 440 |
+
def setup(self):
|
| 441 |
+
block_out_channels = self.block_out_channels
|
| 442 |
+
time_embed_dim = block_out_channels[0] * 4
|
| 443 |
+
|
| 444 |
+
# input
|
| 445 |
+
self.conv_in = nn.Conv(
|
| 446 |
+
block_out_channels[0],
|
| 447 |
+
kernel_size=(3, 3),
|
| 448 |
+
strides=(1, 1),
|
| 449 |
+
padding=((1, 1), (1, 1)),
|
| 450 |
+
dtype=self.dtype,
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
# time
|
| 454 |
+
self.time_proj = FlaxTimesteps(
|
| 455 |
+
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
|
| 456 |
+
)
|
| 457 |
+
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
| 458 |
+
|
| 459 |
+
only_cross_attention = self.only_cross_attention
|
| 460 |
+
if isinstance(only_cross_attention, bool):
|
| 461 |
+
only_cross_attention = (only_cross_attention,) * len(self.down_block_types)
|
| 462 |
+
|
| 463 |
+
attention_head_dim = self.attention_head_dim
|
| 464 |
+
if isinstance(attention_head_dim, int):
|
| 465 |
+
attention_head_dim = (attention_head_dim,) * len(self.down_block_types)
|
| 466 |
+
|
| 467 |
+
# #frame positional embedding
|
| 468 |
+
# self.frame_pe = LoRAPositionalEncoding(self.cross_attention_dim)
|
| 469 |
+
|
| 470 |
+
# down
|
| 471 |
+
down_blocks = []
|
| 472 |
+
output_channel = block_out_channels[0]
|
| 473 |
+
for i, down_block_type in enumerate(self.down_block_types):
|
| 474 |
+
input_channel = output_channel
|
| 475 |
+
output_channel = block_out_channels[i]
|
| 476 |
+
is_final_block = i == len(block_out_channels) - 1
|
| 477 |
+
|
| 478 |
+
if down_block_type == "CrossAttnDownBlock2D":
|
| 479 |
+
down_block = FlaxLoRACrossAttnDownBlock2D(
|
| 480 |
+
in_channels=input_channel,
|
| 481 |
+
out_channels=output_channel,
|
| 482 |
+
dropout=self.dropout,
|
| 483 |
+
num_layers=self.layers_per_block,
|
| 484 |
+
attn_num_head_channels=attention_head_dim[i],
|
| 485 |
+
add_downsample=not is_final_block,
|
| 486 |
+
use_linear_projection=self.use_linear_projection,
|
| 487 |
+
only_cross_attention=only_cross_attention[i],
|
| 488 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 489 |
+
dtype=self.dtype,
|
| 490 |
+
)
|
| 491 |
+
else:
|
| 492 |
+
down_block = FlaxDownBlock2D(
|
| 493 |
+
in_channels=input_channel,
|
| 494 |
+
out_channels=output_channel,
|
| 495 |
+
dropout=self.dropout,
|
| 496 |
+
num_layers=self.layers_per_block,
|
| 497 |
+
add_downsample=not is_final_block,
|
| 498 |
+
dtype=self.dtype,
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
down_blocks.append(down_block)
|
| 502 |
+
self.down_blocks = down_blocks
|
| 503 |
+
|
| 504 |
+
# mid
|
| 505 |
+
self.mid_block = FlaxLoRAUNetCrossAttnMidBlock2D(
|
| 506 |
+
in_channels=block_out_channels[-1],
|
| 507 |
+
dropout=self.dropout,
|
| 508 |
+
attn_num_head_channels=attention_head_dim[-1],
|
| 509 |
+
use_linear_projection=self.use_linear_projection,
|
| 510 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 511 |
+
dtype=self.dtype,
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
# up
|
| 515 |
+
up_blocks = []
|
| 516 |
+
reversed_block_out_channels = list(reversed(block_out_channels))
|
| 517 |
+
reversed_attention_head_dim = list(reversed(attention_head_dim))
|
| 518 |
+
only_cross_attention = list(reversed(only_cross_attention))
|
| 519 |
+
output_channel = reversed_block_out_channels[0]
|
| 520 |
+
for i, up_block_type in enumerate(self.up_block_types):
|
| 521 |
+
prev_output_channel = output_channel
|
| 522 |
+
output_channel = reversed_block_out_channels[i]
|
| 523 |
+
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
| 524 |
+
|
| 525 |
+
is_final_block = i == len(block_out_channels) - 1
|
| 526 |
+
|
| 527 |
+
if up_block_type == "CrossAttnUpBlock2D":
|
| 528 |
+
up_block = FlaxLoRACrossAttnUpBlock2D(
|
| 529 |
+
in_channels=input_channel,
|
| 530 |
+
out_channels=output_channel,
|
| 531 |
+
prev_output_channel=prev_output_channel,
|
| 532 |
+
num_layers=self.layers_per_block + 1,
|
| 533 |
+
attn_num_head_channels=reversed_attention_head_dim[i],
|
| 534 |
+
add_upsample=not is_final_block,
|
| 535 |
+
dropout=self.dropout,
|
| 536 |
+
use_linear_projection=self.use_linear_projection,
|
| 537 |
+
only_cross_attention=only_cross_attention[i],
|
| 538 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
| 539 |
+
dtype=self.dtype,
|
| 540 |
+
)
|
| 541 |
+
else:
|
| 542 |
+
up_block = FlaxUpBlock2D(
|
| 543 |
+
in_channels=input_channel,
|
| 544 |
+
out_channels=output_channel,
|
| 545 |
+
prev_output_channel=prev_output_channel,
|
| 546 |
+
num_layers=self.layers_per_block + 1,
|
| 547 |
+
add_upsample=not is_final_block,
|
| 548 |
+
dropout=self.dropout,
|
| 549 |
+
dtype=self.dtype,
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
up_blocks.append(up_block)
|
| 553 |
+
prev_output_channel = output_channel
|
| 554 |
+
self.up_blocks = up_blocks
|
| 555 |
+
|
| 556 |
+
# out
|
| 557 |
+
self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
| 558 |
+
self.conv_out = nn.Conv(
|
| 559 |
+
self.out_channels,
|
| 560 |
+
kernel_size=(3, 3),
|
| 561 |
+
strides=(1, 1),
|
| 562 |
+
padding=((1, 1), (1, 1)),
|
| 563 |
+
dtype=self.dtype,
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
def __call__(
|
| 567 |
+
self,
|
| 568 |
+
sample,
|
| 569 |
+
timesteps,
|
| 570 |
+
encoder_hidden_states,
|
| 571 |
+
down_block_additional_residuals=None,
|
| 572 |
+
mid_block_additional_residual=None,
|
| 573 |
+
return_dict: bool = True,
|
| 574 |
+
train: bool = False,
|
| 575 |
+
scale: float = 1.,
|
| 576 |
+
) -> Union[FlaxUNet2DConditionOutput, Tuple]:
|
| 577 |
+
r"""
|
| 578 |
+
Args:
|
| 579 |
+
sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor
|
| 580 |
+
timestep (`jnp.ndarray` or `float` or `int`): timesteps
|
| 581 |
+
encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states
|
| 582 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 583 |
+
Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
|
| 584 |
+
plain tuple.
|
| 585 |
+
train (`bool`, *optional*, defaults to `False`):
|
| 586 |
+
Use deterministic functions and disable dropout when not training.
|
| 587 |
+
|
| 588 |
+
Returns:
|
| 589 |
+
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
|
| 590 |
+
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`.
|
| 591 |
+
When returning a tuple, the first element is the sample tensor.
|
| 592 |
+
"""
|
| 593 |
+
# 1. time
|
| 594 |
+
if not isinstance(timesteps, jnp.ndarray):
|
| 595 |
+
timesteps = jnp.array([timesteps], dtype=jnp.int32)
|
| 596 |
+
elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
|
| 597 |
+
timesteps = timesteps.astype(dtype=jnp.float32)
|
| 598 |
+
timesteps = jnp.expand_dims(timesteps, 0)
|
| 599 |
+
|
| 600 |
+
t_emb = self.time_proj(timesteps)
|
| 601 |
+
t_emb = self.time_embedding(t_emb)
|
| 602 |
+
|
| 603 |
+
# 2. pre-process
|
| 604 |
+
sample = jnp.transpose(sample, (0, 2, 3, 1))
|
| 605 |
+
sample = self.conv_in(sample)
|
| 606 |
+
|
| 607 |
+
# 3. down
|
| 608 |
+
down_block_res_samples = (sample,)
|
| 609 |
+
for down_block in self.down_blocks:
|
| 610 |
+
if isinstance(down_block, FlaxLoRACrossAttnDownBlock2D):
|
| 611 |
+
sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train, scale=scale)
|
| 612 |
+
else:
|
| 613 |
+
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
| 614 |
+
down_block_res_samples += res_samples
|
| 615 |
+
|
| 616 |
+
if down_block_additional_residuals is not None:
|
| 617 |
+
new_down_block_res_samples = ()
|
| 618 |
+
|
| 619 |
+
for down_block_res_sample, down_block_additional_residual in zip(
|
| 620 |
+
down_block_res_samples, down_block_additional_residuals
|
| 621 |
+
):
|
| 622 |
+
down_block_res_sample += down_block_additional_residual
|
| 623 |
+
new_down_block_res_samples += (down_block_res_sample,)
|
| 624 |
+
|
| 625 |
+
down_block_res_samples = new_down_block_res_samples
|
| 626 |
+
|
| 627 |
+
# if encoder_hidden_states is not None:
|
| 628 |
+
# #adding frame positional encoding
|
| 629 |
+
# encoder_hidden_states = self.frame_pe(encoder_hidden_states, scale=scale)
|
| 630 |
+
|
| 631 |
+
# 4. mid
|
| 632 |
+
sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train, scale=scale)
|
| 633 |
+
|
| 634 |
+
if mid_block_additional_residual is not None:
|
| 635 |
+
sample += mid_block_additional_residual
|
| 636 |
+
|
| 637 |
+
# 5. up
|
| 638 |
+
for up_block in self.up_blocks:
|
| 639 |
+
res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
|
| 640 |
+
down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]
|
| 641 |
+
if isinstance(up_block, FlaxLoRACrossAttnUpBlock2D):
|
| 642 |
sample = up_block(
|
| 643 |
sample,
|
| 644 |
temb=t_emb,
|
| 645 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 646 |
res_hidden_states_tuple=res_samples,
|
| 647 |
deterministic=not train,
|
| 648 |
+
scale=scale,
|
| 649 |
)
|
| 650 |
+
else:
|
| 651 |
+
sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)
|
| 652 |
|
| 653 |
# 6. post-process
|
| 654 |
sample = self.conv_norm_out(sample)
|
|
|
|
| 659 |
if not return_dict:
|
| 660 |
return (sample,)
|
| 661 |
|
| 662 |
+
return FlaxUNet2DConditionOutput(sample=sample)
|
text_to_animation/pipelines/text_to_video_pipeline_flax.py
CHANGED
|
@@ -11,11 +11,7 @@ from flax.training.common_utils import shard
|
|
| 11 |
from PIL import Image
|
| 12 |
from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel
|
| 13 |
from einops import rearrange, repeat
|
| 14 |
-
from diffusers.models import
|
| 15 |
-
FlaxAutoencoderKL,
|
| 16 |
-
FlaxControlNetModel,
|
| 17 |
-
FlaxUNet2DConditionModel,
|
| 18 |
-
)
|
| 19 |
from diffusers.schedulers import (
|
| 20 |
FlaxDDIMScheduler,
|
| 21 |
FlaxDPMSolverMultistepScheduler,
|
|
@@ -25,24 +21,21 @@ from diffusers.schedulers import (
|
|
| 25 |
from diffusers.utils import PIL_INTERPOLATION, logging, replace_example_docstring
|
| 26 |
from diffusers.pipelines.pipeline_flax_utils import FlaxDiffusionPipeline
|
| 27 |
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionPipelineOutput
|
| 28 |
-
from diffusers.pipelines.stable_diffusion.safety_checker_flax import
|
| 29 |
-
FlaxStableDiffusionSafetyChecker,
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 33 |
"""
|
| 34 |
Text2Video-Zero:
|
| 35 |
- Inputs: Prompt, Pose Control via mp4/gif, First Frame (?)
|
| 36 |
- JAX implementation
|
| 37 |
- 3DUnet to replace 2DUnetConditional
|
| 38 |
-
"""
|
| 39 |
|
|
|
|
| 40 |
|
| 41 |
def replicate_devices(array):
|
| 42 |
return jnp.expand_dims(array, 0).repeat(jax.device_count(), 0)
|
| 43 |
|
| 44 |
|
| 45 |
-
DEBUG = False
|
| 46 |
|
| 47 |
EXAMPLE_DOC_STRING = """
|
| 48 |
Examples:
|
|
@@ -101,8 +94,6 @@ EXAMPLE_DOC_STRING = """
|
|
| 101 |
>>> output_images.save("generated_image.png")
|
| 102 |
```
|
| 103 |
"""
|
| 104 |
-
|
| 105 |
-
|
| 106 |
class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
| 107 |
def __init__(
|
| 108 |
self,
|
|
@@ -113,10 +104,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 113 |
unet_vanilla,
|
| 114 |
controlnet,
|
| 115 |
scheduler: Union[
|
| 116 |
-
FlaxDDIMScheduler,
|
| 117 |
-
FlaxPNDMScheduler,
|
| 118 |
-
FlaxLMSDiscreteScheduler,
|
| 119 |
-
FlaxDPMSolverMultistepScheduler,
|
| 120 |
],
|
| 121 |
safety_checker: FlaxStableDiffusionSafetyChecker,
|
| 122 |
feature_extractor: CLIPFeatureExtractor,
|
|
@@ -154,50 +142,30 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 154 |
else:
|
| 155 |
eps = jax.random.normal(prng, x0.shape, dtype=text_embeddings.dtype)
|
| 156 |
alpha_vec = jnp.prod(params["scheduler"].common.alphas[t0:tMax])
|
| 157 |
-
xt = jnp.sqrt(alpha_vec) * x0 +
|
|
|
|
| 158 |
return xt
|
| 159 |
-
|
| 160 |
-
def DDIM_backward(
|
| 161 |
-
|
| 162 |
-
params,
|
| 163 |
-
num_inference_steps,
|
| 164 |
-
timesteps,
|
| 165 |
-
skip_t,
|
| 166 |
-
t0,
|
| 167 |
-
t1,
|
| 168 |
-
do_classifier_free_guidance,
|
| 169 |
-
text_embeddings,
|
| 170 |
-
latents_local,
|
| 171 |
-
guidance_scale,
|
| 172 |
-
controlnet_image=None,
|
| 173 |
-
controlnet_conditioning_scale=None,
|
| 174 |
-
):
|
| 175 |
-
scheduler_state = self.scheduler.set_timesteps(
|
| 176 |
-
params["scheduler"], num_inference_steps
|
| 177 |
-
)
|
| 178 |
f = latents_local.shape[2]
|
| 179 |
latents_local = rearrange(latents_local, "b c f h w -> (b f) c h w")
|
| 180 |
latents = latents_local.copy()
|
| 181 |
x_t0_1 = None
|
| 182 |
x_t1_1 = None
|
| 183 |
-
max_timestep = len(timesteps)
|
| 184 |
timesteps = jnp.array(timesteps)
|
| 185 |
-
|
| 186 |
def while_body(args):
|
| 187 |
step, latents, x_t0_1, x_t1_1, scheduler_state = args
|
| 188 |
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
| 189 |
-
latent_model_input = (
|
| 190 |
-
|
| 191 |
-
if do_classifier_free_guidance
|
| 192 |
-
else latents
|
| 193 |
-
)
|
| 194 |
latent_model_input = self.scheduler.scale_model_input(
|
| 195 |
scheduler_state, latent_model_input, timestep=t
|
| 196 |
)
|
| 197 |
f = latents.shape[0]
|
| 198 |
-
te = jnp.stack(
|
| 199 |
-
[text_embeddings[0, :, :]] * f + [text_embeddings[-1, :, :]] * f
|
| 200 |
-
)
|
| 201 |
timestep = jnp.broadcast_to(t, latent_model_input.shape[0])
|
| 202 |
if controlnet_image is not None:
|
| 203 |
down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
|
|
@@ -224,43 +192,32 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 224 |
jnp.array(latent_model_input),
|
| 225 |
jnp.array(timestep, dtype=jnp.int32),
|
| 226 |
encoder_hidden_states=te,
|
| 227 |
-
|
| 228 |
# perform guidance
|
| 229 |
if do_classifier_free_guidance:
|
| 230 |
noise_pred_uncond, noise_pred_text = jnp.split(noise_pred, 2, axis=0)
|
| 231 |
-
noise_pred = noise_pred_uncond + guidance_scale *
|
| 232 |
-
noise_pred_text - noise_pred_uncond
|
| 233 |
-
)
|
| 234 |
# compute the previous noisy sample x_t -> x_t-1
|
| 235 |
-
latents, scheduler_state = self.scheduler.step(
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
x_t0_1 = jax.lax.select(
|
| 239 |
-
(step < max_timestep - 1) & (timesteps[step + 1] == t0), latents, x_t0_1
|
| 240 |
-
)
|
| 241 |
-
x_t1_1 = jax.lax.select(
|
| 242 |
-
(step < max_timestep - 1) & (timesteps[step + 1] == t1), latents, x_t1_1
|
| 243 |
-
)
|
| 244 |
return (step + 1, latents, x_t0_1, x_t1_1, scheduler_state)
|
| 245 |
-
|
| 246 |
latents_shape = latents.shape
|
| 247 |
x_t0_1, x_t1_1 = jnp.zeros(latents_shape), jnp.zeros(latents_shape)
|
| 248 |
|
| 249 |
def cond_fun(arg):
|
| 250 |
step, latents, x_t0_1, x_t1_1, scheduler_state = arg
|
| 251 |
return (step < skip_t) & (step < num_inference_steps)
|
| 252 |
-
|
| 253 |
if DEBUG:
|
| 254 |
step = 0
|
| 255 |
while cond_fun((step, latents, x_t0_1, x_t1_1)):
|
| 256 |
-
step, latents, x_t0_1, x_t1_1, scheduler_state = while_body(
|
| 257 |
-
(step, latents, x_t0_1, x_t1_1, scheduler_state)
|
| 258 |
-
)
|
| 259 |
step = step + 1
|
| 260 |
else:
|
| 261 |
-
_, latents, x_t0_1, x_t1_1, scheduler_state = jax.lax.while_loop(
|
| 262 |
-
cond_fun, while_body, (0, latents, x_t0_1, x_t1_1, scheduler_state)
|
| 263 |
-
)
|
| 264 |
latents = rearrange(latents, "(b f) c h w -> b c f h w", f=f)
|
| 265 |
res = {"x0": latents.copy()}
|
| 266 |
if x_t0_1 is not None:
|
|
@@ -270,7 +227,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 270 |
x_t1_1 = rearrange(x_t1_1, "(b f) c h w -> b c f h w", f=f)
|
| 271 |
res["x_t1_1"] = x_t1_1.copy()
|
| 272 |
return res
|
| 273 |
-
|
| 274 |
def warp_latents_independently(self, latents, reference_flow):
|
| 275 |
_, _, H, W = reference_flow.shape
|
| 276 |
b, _, f, h, w = latents.shape
|
|
@@ -281,10 +238,10 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 281 |
coords_t0 = coords_t0.at[:, 1].set(coords_t0[:, 1] * h / H)
|
| 282 |
f, c, _, _ = coords_t0.shape
|
| 283 |
coords_t0 = jax.image.resize(coords_t0, (f, c, h, w), "linear")
|
| 284 |
-
coords_t0 = rearrange(coords_t0,
|
| 285 |
-
latents_0 = rearrange(latents[0],
|
| 286 |
warped = grid_sample(latents_0, coords_t0, "mirror")
|
| 287 |
-
warped = rearrange(warped,
|
| 288 |
return warped
|
| 289 |
|
| 290 |
def warp_vid_independently(self, vid, reference_flow):
|
|
@@ -296,173 +253,75 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 296 |
coords_t0 = coords_t0.at[:, 1].set(coords_t0[:, 1] * h / H)
|
| 297 |
f, c, _, _ = coords_t0.shape
|
| 298 |
coords_t0 = jax.image.resize(coords_t0, (f, c, h, w), "linear")
|
| 299 |
-
coords_t0 = rearrange(coords_t0,
|
| 300 |
# latents_0 = rearrange(vid, 'c f h w -> f c h w')
|
| 301 |
warped = grid_sample(vid, coords_t0, "zeropad")
|
| 302 |
# warped = rearrange(warped, 'f c h w -> b c f h w', f=f)
|
| 303 |
return warped
|
| 304 |
-
|
| 305 |
-
def create_motion_field(
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
motion_field_strength_y,
|
| 309 |
-
frame_ids,
|
| 310 |
-
video_length,
|
| 311 |
-
latents,
|
| 312 |
-
):
|
| 313 |
-
reference_flow = jnp.zeros((video_length - 1, 2, 512, 512), dtype=latents.dtype)
|
| 314 |
for fr_idx, frame_id in enumerate(frame_ids):
|
| 315 |
-
reference_flow = reference_flow.at[fr_idx, 0, :,
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
motion_field_strength_y * (frame_id)
|
| 320 |
-
)
|
| 321 |
return reference_flow
|
| 322 |
-
|
| 323 |
-
def create_motion_field_and_warp_latents(
|
| 324 |
-
self,
|
| 325 |
-
|
| 326 |
-
motion_field_strength_y,
|
| 327 |
-
frame_ids,
|
| 328 |
-
video_length,
|
| 329 |
-
latents,
|
| 330 |
-
):
|
| 331 |
-
motion_field = self.create_motion_field(
|
| 332 |
-
motion_field_strength_x=motion_field_strength_x,
|
| 333 |
-
motion_field_strength_y=motion_field_strength_y,
|
| 334 |
-
latents=latents,
|
| 335 |
-
video_length=video_length,
|
| 336 |
-
frame_ids=frame_ids,
|
| 337 |
-
)
|
| 338 |
for idx, latent in enumerate(latents):
|
| 339 |
-
latents = latents.at[idx].set(
|
| 340 |
-
|
| 341 |
-
)
|
| 342 |
return motion_field, latents
|
| 343 |
|
| 344 |
-
def text_to_video_zero(
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
controlnet_conditioning_scale=0,
|
| 364 |
-
):
|
| 365 |
frame_ids = list(range(video_length))
|
| 366 |
# Prepare timesteps
|
| 367 |
-
params["scheduler"] = self.scheduler.set_timesteps(
|
| 368 |
-
params["scheduler"], num_inference_steps
|
| 369 |
-
)
|
| 370 |
timesteps = params["scheduler"].timesteps
|
| 371 |
# Prepare latent variables
|
| 372 |
num_channels_latents = self.unet.in_channels
|
| 373 |
batch_size = 1
|
| 374 |
-
xT = prepare_latents(
|
| 375 |
-
params,
|
| 376 |
-
prng,
|
| 377 |
-
batch_size * num_videos_per_prompt,
|
| 378 |
-
num_channels_latents,
|
| 379 |
-
height,
|
| 380 |
-
width,
|
| 381 |
-
self.vae_scale_factor,
|
| 382 |
-
xT,
|
| 383 |
-
)
|
| 384 |
|
| 385 |
-
timesteps_ddpm = [
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
921,
|
| 390 |
-
901,
|
| 391 |
-
881,
|
| 392 |
-
861,
|
| 393 |
-
841,
|
| 394 |
-
821,
|
| 395 |
-
801,
|
| 396 |
-
781,
|
| 397 |
-
761,
|
| 398 |
-
741,
|
| 399 |
-
721,
|
| 400 |
-
701,
|
| 401 |
-
681,
|
| 402 |
-
661,
|
| 403 |
-
641,
|
| 404 |
-
621,
|
| 405 |
-
601,
|
| 406 |
-
581,
|
| 407 |
-
561,
|
| 408 |
-
541,
|
| 409 |
-
521,
|
| 410 |
-
501,
|
| 411 |
-
481,
|
| 412 |
-
461,
|
| 413 |
-
441,
|
| 414 |
-
421,
|
| 415 |
-
401,
|
| 416 |
-
381,
|
| 417 |
-
361,
|
| 418 |
-
341,
|
| 419 |
-
321,
|
| 420 |
-
301,
|
| 421 |
-
281,
|
| 422 |
-
261,
|
| 423 |
-
241,
|
| 424 |
-
221,
|
| 425 |
-
201,
|
| 426 |
-
181,
|
| 427 |
-
161,
|
| 428 |
-
141,
|
| 429 |
-
121,
|
| 430 |
-
101,
|
| 431 |
-
81,
|
| 432 |
-
61,
|
| 433 |
-
41,
|
| 434 |
-
21,
|
| 435 |
-
1,
|
| 436 |
-
]
|
| 437 |
timesteps_ddpm.reverse()
|
| 438 |
t0 = timesteps_ddpm[t0]
|
| 439 |
t1 = timesteps_ddpm[t1]
|
| 440 |
x_t1_1 = None
|
| 441 |
|
| 442 |
# Denoising loop
|
| 443 |
-
shape = (
|
| 444 |
-
|
| 445 |
-
num_channels_latents,
|
| 446 |
-
1,
|
| 447 |
-
height // self.vae.scaling_factor,
|
| 448 |
-
width // self.vae.scaling_factor,
|
| 449 |
-
)
|
| 450 |
|
| 451 |
# perform ∆t backward steps by stable diffusion
|
| 452 |
-
ddim_res = self.DDIM_backward(
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
timesteps=timesteps,
|
| 456 |
-
skip_t=1000,
|
| 457 |
-
t0=t0,
|
| 458 |
-
t1=t1,
|
| 459 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 460 |
-
text_embeddings=text_embeddings,
|
| 461 |
-
latents_local=xT,
|
| 462 |
-
guidance_scale=guidance_scale,
|
| 463 |
-
controlnet_image=jnp.stack([controlnet_image[0]] * 2),
|
| 464 |
-
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 465 |
-
)
|
| 466 |
x0 = ddim_res["x0"]
|
| 467 |
|
| 468 |
# apply warping functions
|
|
@@ -470,89 +329,46 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 470 |
x_t0_1 = ddim_res["x_t0_1"]
|
| 471 |
if "x_t1_1" in ddim_res:
|
| 472 |
x_t1_1 = ddim_res["x_t1_1"]
|
| 473 |
-
x_t0_k = x_t0_1[:, :, :1, :, :].repeat(video_length
|
| 474 |
reference_flow, x_t0_k = self.create_motion_field_and_warp_latents(
|
| 475 |
-
motion_field_strength_x=motion_field_strength_x,
|
| 476 |
-
motion_field_strength_y=motion_field_strength_y,
|
| 477 |
-
latents=x_t0_k,
|
| 478 |
-
video_length=video_length,
|
| 479 |
-
frame_ids=frame_ids[1:],
|
| 480 |
-
)
|
| 481 |
# assuming t0=t1=1000, if t0 = 1000
|
| 482 |
|
| 483 |
# DDPM forward for more motion freedom
|
| 484 |
-
ddpm_fwd = partial(
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
t0=t0,
|
| 490 |
-
tMax=t1,
|
| 491 |
-
shape=shape,
|
| 492 |
-
text_embeddings=text_embeddings,
|
| 493 |
)
|
| 494 |
-
x_t1_k = jax.lax.cond(t1 > t0, ddpm_fwd, lambda: x_t0_k)
|
| 495 |
x_t1 = jnp.concatenate([x_t1_1, x_t1_k], axis=2)
|
| 496 |
|
| 497 |
# backward stepts by stable diffusion
|
| 498 |
|
| 499 |
-
#
|
| 500 |
controlnet_video = controlnet_image[:video_length]
|
| 501 |
-
controlnet_video = controlnet_video.at[1:].set(
|
| 502 |
-
|
| 503 |
-
)
|
| 504 |
-
controlnet_image = jnp.concatenate([controlnet_video] * 2)
|
| 505 |
smooth_bg = True
|
| 506 |
|
| 507 |
if smooth_bg:
|
| 508 |
-
#
|
| 509 |
-
M_FG = repeat(
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
)
|
| 515 |
-
initial_bg = repeat(
|
| 516 |
-
x_t1[:, :, 0] * (1 - M_FG[:, :, 0]),
|
| 517 |
-
"b c h w -> b c f h w",
|
| 518 |
-
f=video_length - 1,
|
| 519 |
-
)
|
| 520 |
-
# warp the controlnet image following the same flow defined for latent #f c h w
|
| 521 |
-
initial_bg_warped = self.warp_latents_independently(
|
| 522 |
-
initial_bg, reference_flow
|
| 523 |
-
)
|
| 524 |
-
bgs = x_t1[:, :, 1:] * (1 - M_FG[:, :, 1:]) # initial background
|
| 525 |
-
initial_mask_warped = 1 - self.warp_latents_independently(
|
| 526 |
-
repeat(M_FG[:, :, 0], "b c h w -> b c f h w", f=video_length - 1),
|
| 527 |
-
reference_flow,
|
| 528 |
-
)
|
| 529 |
# initial_mask_warped = 1 - warp_vid_independently(repeat(M_FG[:,:,0], "b c h w -> (b f) c h w", f = video_length-1), reference_flow)
|
| 530 |
# initial_mask_warped = rearrange(initial_mask_warped, "(b f) c h w -> b c f h w", b=batch_size)
|
| 531 |
-
mask = (1 - M_FG[
|
| 532 |
-
x_t1 = x_t1.at[
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
)
|
| 540 |
-
|
| 541 |
-
ddim_res = self.DDIM_backward(
|
| 542 |
-
params,
|
| 543 |
-
num_inference_steps=num_inference_steps,
|
| 544 |
-
timesteps=timesteps,
|
| 545 |
-
skip_t=t1,
|
| 546 |
-
t0=-1,
|
| 547 |
-
t1=-1,
|
| 548 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 549 |
-
text_embeddings=text_embeddings,
|
| 550 |
-
latents_local=x_t1,
|
| 551 |
-
guidance_scale=guidance_scale,
|
| 552 |
-
controlnet_image=controlnet_image,
|
| 553 |
-
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 554 |
-
)
|
| 555 |
-
|
| 556 |
x0 = ddim_res["x0"]
|
| 557 |
del ddim_res
|
| 558 |
del x_t1
|
|
@@ -560,42 +376,25 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 560 |
del x_t1_k
|
| 561 |
return x0
|
| 562 |
|
| 563 |
-
def denoise_latent(
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
num_inference_steps
|
| 567 |
-
timesteps,
|
| 568 |
-
do_classifier_free_guidance,
|
| 569 |
-
text_embeddings,
|
| 570 |
-
latents,
|
| 571 |
-
guidance_scale,
|
| 572 |
-
controlnet_image=None,
|
| 573 |
-
controlnet_conditioning_scale=None,
|
| 574 |
-
):
|
| 575 |
-
scheduler_state = self.scheduler.set_timesteps(
|
| 576 |
-
params["scheduler"], num_inference_steps
|
| 577 |
-
)
|
| 578 |
# f = latents_local.shape[2]
|
| 579 |
# latents_local = rearrange(latents_local, "b c f h w -> (b f) c h w")
|
| 580 |
|
| 581 |
-
max_timestep = len(timesteps)
|
| 582 |
timesteps = jnp.array(timesteps)
|
| 583 |
-
|
| 584 |
def while_body(args):
|
| 585 |
step, latents, scheduler_state = args
|
| 586 |
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
| 587 |
-
latent_model_input = (
|
| 588 |
-
|
| 589 |
-
if do_classifier_free_guidance
|
| 590 |
-
else latents
|
| 591 |
-
)
|
| 592 |
latent_model_input = self.scheduler.scale_model_input(
|
| 593 |
scheduler_state, latent_model_input, timestep=t
|
| 594 |
)
|
| 595 |
f = latents.shape[0]
|
| 596 |
-
te = jnp.stack(
|
| 597 |
-
[text_embeddings[0, :, :]] * f + [text_embeddings[-1, :, :]] * f
|
| 598 |
-
)
|
| 599 |
timestep = jnp.broadcast_to(t, latent_model_input.shape[0])
|
| 600 |
if controlnet_image is not None:
|
| 601 |
down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
|
|
@@ -622,215 +421,104 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 622 |
jnp.array(latent_model_input),
|
| 623 |
jnp.array(timestep, dtype=jnp.int32),
|
| 624 |
encoder_hidden_states=te,
|
| 625 |
-
|
| 626 |
# perform guidance
|
| 627 |
if do_classifier_free_guidance:
|
| 628 |
noise_pred_uncond, noise_pred_text = jnp.split(noise_pred, 2, axis=0)
|
| 629 |
-
noise_pred = noise_pred_uncond + guidance_scale *
|
| 630 |
-
noise_pred_text - noise_pred_uncond
|
| 631 |
-
)
|
| 632 |
# compute the previous noisy sample x_t -> x_t-1
|
| 633 |
-
latents, scheduler_state = self.scheduler.step(
|
| 634 |
-
scheduler_state, noise_pred, t, latents
|
| 635 |
-
).to_tuple()
|
| 636 |
return (step + 1, latents, scheduler_state)
|
| 637 |
-
|
| 638 |
def cond_fun(arg):
|
| 639 |
step, latents, scheduler_state = arg
|
| 640 |
-
return step < num_inference_steps
|
| 641 |
-
|
| 642 |
if DEBUG:
|
| 643 |
step = 0
|
| 644 |
while cond_fun((step, latents, scheduler_state)):
|
| 645 |
-
step, latents, scheduler_state = while_body(
|
| 646 |
-
(step, latents, scheduler_state)
|
| 647 |
-
)
|
| 648 |
step = step + 1
|
| 649 |
else:
|
| 650 |
-
_, latents, scheduler_state = jax.lax.while_loop(
|
| 651 |
-
cond_fun, while_body, (0, latents, scheduler_state)
|
| 652 |
-
)
|
| 653 |
# latents = rearrange(latents, "(b f) c h w -> b c f h w", f=f)
|
| 654 |
return latents
|
| 655 |
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
|
| 668 |
-
|
| 669 |
-
# delta_t_diffusion = jax.vmap(lambda latent : self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=1000, t0=t0, t1=t1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 670 |
-
# text_embeddings=text_embeddings, latents_local=latent, guidance_scale=guidance_scale,
|
| 671 |
-
# controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale))
|
| 672 |
-
# ddim_res = delta_t_diffusion(latents)
|
| 673 |
-
# latents = ddim_res["x0"] #output is i b c f h w
|
| 674 |
-
|
| 675 |
-
# DDPM forward for more motion freedom
|
| 676 |
-
# ddpm_fwd = jax.vmap(lambda prng, latent: self.DDPM_forward(params=params, prng=prng, x0=latent, t0=t0,
|
| 677 |
-
# tMax=t1, shape=shape, text_embeddings=text_embeddings))
|
| 678 |
-
# latents = ddpm_fwd(stacked_prngs, latents)
|
| 679 |
-
# main backward diffusion
|
| 680 |
-
# denoise_first_frame = lambda latent : self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=100000, t0=-1, t1=-1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 681 |
-
# text_embeddings=text_embeddings, latents_local=latent, guidance_scale=guidance_scale,
|
| 682 |
-
# controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale, use_vanilla=True)
|
| 683 |
-
# latents = rearrange(latents, 'i b c f h w -> (i b) c f h w')
|
| 684 |
-
# ddim_res = denoise_first_frame(latents)
|
| 685 |
-
latents = self.denoise_latent(
|
| 686 |
-
params,
|
| 687 |
-
num_inference_steps=num_inference_steps,
|
| 688 |
-
timesteps=timesteps,
|
| 689 |
-
do_classifier_free_guidance=True,
|
| 690 |
-
text_embeddings=text_embeddings,
|
| 691 |
-
latents=latents,
|
| 692 |
-
guidance_scale=guidance_scale,
|
| 693 |
-
controlnet_image=controlnet_image,
|
| 694 |
-
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 695 |
-
)
|
| 696 |
-
# latents = rearrange(ddim_res["x0"], 'i b c f h w -> (i b) c f h w') #output is i b c f h w
|
| 697 |
-
|
| 698 |
-
# scale and decode the image latents with vae
|
| 699 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
| 700 |
-
# latents = rearrange(latents, "b c h w -> (b f) c h w")
|
| 701 |
-
imgs = self.vae.apply(
|
| 702 |
-
{"params": params["vae"]}, latents, method=self.vae.decode
|
| 703 |
-
).sample
|
| 704 |
-
imgs = (imgs / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
| 705 |
-
return imgs
|
| 706 |
|
| 707 |
-
def generate_starting_frames(
|
| 708 |
-
self,
|
| 709 |
-
params,
|
| 710 |
-
prngs: list, # list of prngs for each img
|
| 711 |
-
prompt,
|
| 712 |
-
neg_prompt,
|
| 713 |
-
controlnet_image,
|
| 714 |
-
do_classifier_free_guidance=True,
|
| 715 |
-
num_inference_steps: int = 50,
|
| 716 |
-
guidance_scale: float = 7.5,
|
| 717 |
-
t0: int = 44,
|
| 718 |
-
t1: int = 47,
|
| 719 |
-
controlnet_conditioning_scale=1.0,
|
| 720 |
-
):
|
| 721 |
height, width = controlnet_image.shape[-2:]
|
| 722 |
if height % 64 != 0 or width % 64 != 0:
|
| 723 |
-
raise ValueError(
|
| 724 |
-
f"`height` and `width` have to be divisible by 64 but are {height} and {width}."
|
| 725 |
-
)
|
| 726 |
|
| 727 |
-
shape = (
|
| 728 |
-
|
| 729 |
-
height // self.vae_scale_factor,
|
| 730 |
-
width // self.vae_scale_factor,
|
| 731 |
-
) # b c h w
|
| 732 |
# scale the initial noise by the standard deviation required by the scheduler
|
| 733 |
|
| 734 |
-
print(
|
| 735 |
-
f"Generating {len(prngs)} first frames with prompt {prompt}, for {num_inference_steps} steps. PRNG seeds are: {prngs}"
|
| 736 |
-
)
|
| 737 |
|
| 738 |
-
latents = jnp.stack(
|
| 739 |
-
[jax.random.normal(prng, shape) for prng in prngs]
|
| 740 |
-
) # b c h w
|
| 741 |
latents = latents * params["scheduler"].init_noise_sigma
|
| 742 |
|
| 743 |
timesteps = params["scheduler"].timesteps
|
| 744 |
-
timesteps_ddpm = [
|
| 745 |
-
|
| 746 |
-
|
| 747 |
-
|
| 748 |
-
921,
|
| 749 |
-
901,
|
| 750 |
-
881,
|
| 751 |
-
861,
|
| 752 |
-
841,
|
| 753 |
-
821,
|
| 754 |
-
801,
|
| 755 |
-
781,
|
| 756 |
-
761,
|
| 757 |
-
741,
|
| 758 |
-
721,
|
| 759 |
-
701,
|
| 760 |
-
681,
|
| 761 |
-
661,
|
| 762 |
-
641,
|
| 763 |
-
621,
|
| 764 |
-
601,
|
| 765 |
-
581,
|
| 766 |
-
561,
|
| 767 |
-
541,
|
| 768 |
-
521,
|
| 769 |
-
501,
|
| 770 |
-
481,
|
| 771 |
-
461,
|
| 772 |
-
441,
|
| 773 |
-
421,
|
| 774 |
-
401,
|
| 775 |
-
381,
|
| 776 |
-
361,
|
| 777 |
-
341,
|
| 778 |
-
321,
|
| 779 |
-
301,
|
| 780 |
-
281,
|
| 781 |
-
261,
|
| 782 |
-
241,
|
| 783 |
-
221,
|
| 784 |
-
201,
|
| 785 |
-
181,
|
| 786 |
-
161,
|
| 787 |
-
141,
|
| 788 |
-
121,
|
| 789 |
-
101,
|
| 790 |
-
81,
|
| 791 |
-
61,
|
| 792 |
-
41,
|
| 793 |
-
21,
|
| 794 |
-
1,
|
| 795 |
-
]
|
| 796 |
timesteps_ddpm.reverse()
|
| 797 |
t0 = timesteps_ddpm[t0]
|
| 798 |
t1 = timesteps_ddpm[t1]
|
| 799 |
|
| 800 |
# get prompt text embeddings
|
| 801 |
-
prompt_ids = self.prepare_text_inputs(prompt)
|
| 802 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 803 |
|
| 804 |
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
| 805 |
# implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
|
| 806 |
batch_size = 1
|
| 807 |
max_length = prompt_ids.shape[-1]
|
| 808 |
if neg_prompt is None:
|
| 809 |
-
uncond_input = self.tokenizer(
|
| 810 |
-
[""] * batch_size,
|
| 811 |
-
|
| 812 |
-
max_length=max_length,
|
| 813 |
-
return_tensors="np",
|
| 814 |
-
).input_ids
|
| 815 |
else:
|
| 816 |
neg_prompt_ids = self.prepare_text_inputs(neg_prompt)
|
| 817 |
-
uncond_input = neg_prompt_ids
|
| 818 |
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
)
|
| 834 |
|
| 835 |
def generate_video(
|
| 836 |
self,
|
|
@@ -845,8 +533,8 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 845 |
controlnet_conditioning_scale: Union[float, jnp.array] = 1.0,
|
| 846 |
return_dict: bool = True,
|
| 847 |
jit: bool = False,
|
| 848 |
-
xT=None,
|
| 849 |
-
smooth_bg_strength: float
|
| 850 |
motion_field_strength_x: float = 3,
|
| 851 |
motion_field_strength_y: float = 4,
|
| 852 |
t0: int = 44,
|
|
@@ -912,9 +600,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 912 |
if isinstance(controlnet_conditioning_scale, float):
|
| 913 |
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
| 914 |
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
| 915 |
-
controlnet_conditioning_scale = jnp.array(
|
| 916 |
-
[controlnet_conditioning_scale] * prompt_ids.shape[0]
|
| 917 |
-
)
|
| 918 |
if len(prompt_ids.shape) > 2:
|
| 919 |
# Assume sharded
|
| 920 |
controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
|
|
@@ -928,9 +614,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 928 |
num_inference_steps,
|
| 929 |
replicate_devices(guidance_scale),
|
| 930 |
replicate_devices(latents) if latents is not None else None,
|
| 931 |
-
replicate_devices(neg_prompt_ids)
|
| 932 |
-
if neg_prompt_ids is not None
|
| 933 |
-
else None,
|
| 934 |
replicate_devices(controlnet_conditioning_scale),
|
| 935 |
replicate_devices(xT) if xT is not None else None,
|
| 936 |
replicate_devices(smooth_bg_strength),
|
|
@@ -961,12 +645,8 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 961 |
safety_params = params["safety_checker"]
|
| 962 |
images_uint8_casted = (images * 255).round().astype("uint8")
|
| 963 |
num_devices, batch_size = images.shape[:2]
|
| 964 |
-
images_uint8_casted = np.asarray(images_uint8_casted).reshape(
|
| 965 |
-
|
| 966 |
-
)
|
| 967 |
-
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(
|
| 968 |
-
images_uint8_casted, safety_params, jit
|
| 969 |
-
)
|
| 970 |
images = np.asarray(images)
|
| 971 |
# block images
|
| 972 |
if any(has_nsfw_concept):
|
|
@@ -979,15 +659,11 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 979 |
has_nsfw_concept = False
|
| 980 |
if not return_dict:
|
| 981 |
return (images, has_nsfw_concept)
|
| 982 |
-
return FlaxStableDiffusionPipelineOutput(
|
| 983 |
-
images=images, nsfw_content_detected=has_nsfw_concept
|
| 984 |
-
)
|
| 985 |
|
| 986 |
def prepare_text_inputs(self, prompt: Union[str, List[str]]):
|
| 987 |
if not isinstance(prompt, (str, list)):
|
| 988 |
-
raise ValueError(
|
| 989 |
-
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
|
| 990 |
-
)
|
| 991 |
text_input = self.tokenizer(
|
| 992 |
prompt,
|
| 993 |
padding="max_length",
|
|
@@ -996,38 +672,27 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 996 |
return_tensors="np",
|
| 997 |
)
|
| 998 |
return text_input.input_ids
|
| 999 |
-
|
| 1000 |
def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]):
|
| 1001 |
if not isinstance(image, (Image.Image, list)):
|
| 1002 |
-
raise ValueError(
|
| 1003 |
-
f"image has to be of type `PIL.Image.Image` or list but is {type(image)}"
|
| 1004 |
-
)
|
| 1005 |
if isinstance(image, Image.Image):
|
| 1006 |
image = [image]
|
| 1007 |
-
processed_images = jnp.concatenate(
|
| 1008 |
-
[preprocess(img, jnp.float32) for img in image]
|
| 1009 |
-
)
|
| 1010 |
return processed_images
|
| 1011 |
-
|
| 1012 |
def _get_has_nsfw_concepts(self, features, params):
|
| 1013 |
has_nsfw_concepts = self.safety_checker(features, params)
|
| 1014 |
return has_nsfw_concepts
|
| 1015 |
-
|
| 1016 |
def _run_safety_checker(self, images, safety_model_params, jit=False):
|
| 1017 |
# safety_model_params should already be replicated when jit is True
|
| 1018 |
pil_images = [Image.fromarray(image) for image in images]
|
| 1019 |
features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
|
| 1020 |
if jit:
|
| 1021 |
features = shard(features)
|
| 1022 |
-
has_nsfw_concepts = _p_get_has_nsfw_concepts(
|
| 1023 |
-
self, features, safety_model_params
|
| 1024 |
-
)
|
| 1025 |
has_nsfw_concepts = unshard(has_nsfw_concepts)
|
| 1026 |
safety_model_params = unreplicate(safety_model_params)
|
| 1027 |
else:
|
| 1028 |
-
has_nsfw_concepts = self._get_has_nsfw_concepts(
|
| 1029 |
-
features, safety_model_params
|
| 1030 |
-
)
|
| 1031 |
images_was_copied = False
|
| 1032 |
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
|
| 1033 |
if has_nsfw_concept:
|
|
@@ -1041,7 +706,6 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1041 |
" instead. Try again with a different prompt and/or seed."
|
| 1042 |
)
|
| 1043 |
return images, has_nsfw_concepts
|
| 1044 |
-
|
| 1045 |
def _generate(
|
| 1046 |
self,
|
| 1047 |
prompt_ids: jnp.array,
|
|
@@ -1053,8 +717,8 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1053 |
latents: Optional[jnp.array] = None,
|
| 1054 |
neg_prompt_ids: Optional[jnp.array] = None,
|
| 1055 |
controlnet_conditioning_scale: float = 1.0,
|
| 1056 |
-
xT=None,
|
| 1057 |
-
smooth_bg_strength: float = 0
|
| 1058 |
motion_field_strength_x: float = 12,
|
| 1059 |
motion_field_strength_y: float = 12,
|
| 1060 |
t0: int = 44,
|
|
@@ -1063,9 +727,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1063 |
height, width = image.shape[-2:]
|
| 1064 |
video_length = image.shape[0]
|
| 1065 |
if height % 64 != 0 or width % 64 != 0:
|
| 1066 |
-
raise ValueError(
|
| 1067 |
-
f"`height` and `width` have to be divisible by 64 but are {height} and {width}."
|
| 1068 |
-
)
|
| 1069 |
# get prompt text embeddings
|
| 1070 |
prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
|
| 1071 |
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
|
@@ -1074,47 +736,30 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1074 |
max_length = prompt_ids.shape[-1]
|
| 1075 |
if neg_prompt_ids is None:
|
| 1076 |
uncond_input = self.tokenizer(
|
| 1077 |
-
[""] * batch_size,
|
| 1078 |
-
padding="max_length",
|
| 1079 |
-
max_length=max_length,
|
| 1080 |
-
return_tensors="np",
|
| 1081 |
).input_ids
|
| 1082 |
else:
|
| 1083 |
uncond_input = neg_prompt_ids
|
| 1084 |
-
negative_prompt_embeds = self.text_encoder(
|
| 1085 |
-
uncond_input, params=params["text_encoder"]
|
| 1086 |
-
)[0]
|
| 1087 |
context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
|
| 1088 |
image = jnp.concatenate([image] * 2)
|
| 1089 |
seed_t2vz, prng_seed = jax.random.split(prng_seed)
|
| 1090 |
-
#
|
| 1091 |
-
latents = self.text_to_video_zero(
|
| 1092 |
-
|
| 1093 |
-
|
| 1094 |
-
|
| 1095 |
-
|
| 1096 |
-
|
| 1097 |
-
|
| 1098 |
-
|
| 1099 |
-
guidance_scale=guidance_scale,
|
| 1100 |
-
controlnet_image=image,
|
| 1101 |
-
xT=xT,
|
| 1102 |
-
smooth_bg_strength=smooth_bg_strength,
|
| 1103 |
-
t0=t0,
|
| 1104 |
-
t1=t1,
|
| 1105 |
-
motion_field_strength_x=motion_field_strength_x,
|
| 1106 |
-
motion_field_strength_y=motion_field_strength_y,
|
| 1107 |
-
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 1108 |
-
)
|
| 1109 |
# scale and decode the image latents with vae
|
| 1110 |
latents = 1 / self.vae.config.scaling_factor * latents
|
| 1111 |
latents = rearrange(latents, "b c f h w -> (b f) c h w")
|
| 1112 |
-
video = self.vae.apply(
|
| 1113 |
-
{"params": params["vae"]}, latents, method=self.vae.decode
|
| 1114 |
-
).sample
|
| 1115 |
video = (video / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
| 1116 |
return video
|
| 1117 |
-
|
| 1118 |
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 1119 |
def __call__(
|
| 1120 |
self,
|
|
@@ -1129,8 +774,8 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1129 |
controlnet_conditioning_scale: Union[float, jnp.array] = 1.0,
|
| 1130 |
return_dict: bool = True,
|
| 1131 |
jit: bool = False,
|
| 1132 |
-
xT=None,
|
| 1133 |
-
smooth_bg_strength: float = 0
|
| 1134 |
motion_field_strength_x: float = 3,
|
| 1135 |
motion_field_strength_y: float = 4,
|
| 1136 |
t0: int = 44,
|
|
@@ -1187,9 +832,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1187 |
if isinstance(controlnet_conditioning_scale, float):
|
| 1188 |
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
| 1189 |
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
| 1190 |
-
controlnet_conditioning_scale = jnp.array(
|
| 1191 |
-
[controlnet_conditioning_scale] * prompt_ids.shape[0]
|
| 1192 |
-
)
|
| 1193 |
if len(prompt_ids.shape) > 2:
|
| 1194 |
# Assume sharded
|
| 1195 |
controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
|
|
@@ -1234,12 +877,8 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1234 |
safety_params = params["safety_checker"]
|
| 1235 |
images_uint8_casted = (images * 255).round().astype("uint8")
|
| 1236 |
num_devices, batch_size = images.shape[:2]
|
| 1237 |
-
images_uint8_casted = np.asarray(images_uint8_casted).reshape(
|
| 1238 |
-
|
| 1239 |
-
)
|
| 1240 |
-
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(
|
| 1241 |
-
images_uint8_casted, safety_params, jit
|
| 1242 |
-
)
|
| 1243 |
images = np.asarray(images)
|
| 1244 |
# block images
|
| 1245 |
if any(has_nsfw_concept):
|
|
@@ -1252,9 +891,7 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1252 |
has_nsfw_concept = False
|
| 1253 |
if not return_dict:
|
| 1254 |
return (images, has_nsfw_concept)
|
| 1255 |
-
return FlaxStableDiffusionPipelineOutput(
|
| 1256 |
-
images=images, nsfw_content_detected=has_nsfw_concept
|
| 1257 |
-
)
|
| 1258 |
|
| 1259 |
|
| 1260 |
# Static argnums are pipe, num_inference_steps. A change would trigger recompilation.
|
|
@@ -1262,11 +899,11 @@ class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
|
| 1262 |
@partial(
|
| 1263 |
jax.pmap,
|
| 1264 |
in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0, 0, 0, 0, 0, None, None),
|
| 1265 |
-
static_broadcasted_argnums=(0, 5, 14, 15)
|
| 1266 |
)
|
| 1267 |
def _p_generate(
|
| 1268 |
pipe,
|
| 1269 |
-
prompt_ids,
|
| 1270 |
image,
|
| 1271 |
params,
|
| 1272 |
prng_seed,
|
|
@@ -1299,20 +936,52 @@ def _p_generate(
|
|
| 1299 |
t0,
|
| 1300 |
t1,
|
| 1301 |
)
|
| 1302 |
-
|
| 1303 |
-
|
| 1304 |
@partial(jax.pmap, static_broadcasted_argnums=(0,))
|
| 1305 |
def _p_get_has_nsfw_concepts(pipe, features, params):
|
| 1306 |
return pipe._get_has_nsfw_concepts(features, params)
|
| 1307 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1308 |
|
| 1309 |
def unshard(x: jnp.ndarray):
|
| 1310 |
# einops.rearrange(x, 'd b ... -> (d b) ...')
|
| 1311 |
num_devices, batch_size = x.shape[:2]
|
| 1312 |
rest = x.shape[2:]
|
| 1313 |
return x.reshape(num_devices * batch_size, *rest)
|
| 1314 |
-
|
| 1315 |
-
|
| 1316 |
def preprocess(image, dtype):
|
| 1317 |
image = image.convert("RGB")
|
| 1318 |
w, h = image.size
|
|
@@ -1322,98 +991,61 @@ def preprocess(image, dtype):
|
|
| 1322 |
image = image[None].transpose(0, 3, 1, 2)
|
| 1323 |
return image
|
| 1324 |
|
| 1325 |
-
|
| 1326 |
-
|
| 1327 |
-
|
| 1328 |
-
prng,
|
| 1329 |
-
batch_size,
|
| 1330 |
-
num_channels_latents,
|
| 1331 |
-
height,
|
| 1332 |
-
width,
|
| 1333 |
-
vae_scale_factor,
|
| 1334 |
-
latents=None,
|
| 1335 |
-
):
|
| 1336 |
-
shape = (
|
| 1337 |
-
batch_size,
|
| 1338 |
-
num_channels_latents,
|
| 1339 |
-
1,
|
| 1340 |
-
height // vae_scale_factor,
|
| 1341 |
-
width // vae_scale_factor,
|
| 1342 |
-
) # b c f h w
|
| 1343 |
# scale the initial noise by the standard deviation required by the scheduler
|
| 1344 |
if latents is None:
|
| 1345 |
latents = jax.random.normal(prng, shape)
|
| 1346 |
latents = latents * params["scheduler"].init_noise_sigma
|
| 1347 |
return latents
|
| 1348 |
|
| 1349 |
-
|
| 1350 |
def coords_grid(batch, ht, wd):
|
| 1351 |
coords = jnp.meshgrid(jnp.arange(ht), jnp.arange(wd), indexing="ij")
|
| 1352 |
coords = jnp.stack(coords[::-1], axis=0)
|
| 1353 |
return coords[None].repeat(batch, 0)
|
| 1354 |
|
| 1355 |
-
|
| 1356 |
def adapt_pos_mirror(x, y, W, H):
|
| 1357 |
-
|
| 1358 |
-
|
| 1359 |
-
|
| 1360 |
-
|
| 1361 |
-
|
| 1362 |
-
|
| 1363 |
-
|
| 1364 |
|
| 1365 |
-
def safe_get_zeropad(img, x,
|
| 1366 |
-
|
| 1367 |
-
|
| 1368 |
-
|
| 1369 |
-
def safe_get_mirror(img, x, y, W, H):
|
| 1370 |
-
return img[adapt_pos_mirror(x, y, W, H)]
|
| 1371 |
|
|
|
|
|
|
|
| 1372 |
|
| 1373 |
@partial(jax.vmap, in_axes=(0, 0, None))
|
| 1374 |
@partial(jax.vmap, in_axes=(0, None, None))
|
| 1375 |
-
@partial(jax.vmap, in_axes=(None,
|
| 1376 |
@partial(jax.vmap, in_axes=(None, 0, None))
|
| 1377 |
def grid_sample(latents, grid, method):
|
| 1378 |
# this is an alternative to torch.functional.nn.grid_sample in jax
|
| 1379 |
# this implementation is following the algorithm described @ https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
|
| 1380 |
# but with coordinates scaled to the size of the image
|
| 1381 |
if method == "mirror":
|
| 1382 |
-
|
| 1383 |
-
|
| 1384 |
-
|
| 1385 |
-
jnp.array(grid[1], dtype=jnp.int16),
|
| 1386 |
-
latents.shape[0],
|
| 1387 |
-
latents.shape[1],
|
| 1388 |
-
)
|
| 1389 |
-
else: # default is zero padding
|
| 1390 |
-
return safe_get_zeropad(
|
| 1391 |
-
latents,
|
| 1392 |
-
jnp.array(grid[0], dtype=jnp.int16),
|
| 1393 |
-
jnp.array(grid[1], dtype=jnp.int16),
|
| 1394 |
-
latents.shape[0],
|
| 1395 |
-
latents.shape[1],
|
| 1396 |
-
)
|
| 1397 |
-
|
| 1398 |
|
| 1399 |
def bandw_vid(vid, threshold):
|
| 1400 |
-
|
| 1401 |
-
|
| 1402 |
-
|
| 1403 |
|
| 1404 |
def mean_blur(vid, k):
|
| 1405 |
-
|
| 1406 |
-
|
| 1407 |
-
|
| 1408 |
-
|
| 1409 |
-
smooth_vid = convolve(vid, window)
|
| 1410 |
-
return smooth_vid
|
| 1411 |
-
|
| 1412 |
|
| 1413 |
def get_mask_pose(vid):
|
| 1414 |
-
|
| 1415 |
-
|
| 1416 |
-
|
| 1417 |
-
|
| 1418 |
-
|
| 1419 |
-
|
|
|
|
| 11 |
from PIL import Image
|
| 12 |
from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel
|
| 13 |
from einops import rearrange, repeat
|
| 14 |
+
from diffusers.models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
from diffusers.schedulers import (
|
| 16 |
FlaxDDIMScheduler,
|
| 17 |
FlaxDPMSolverMultistepScheduler,
|
|
|
|
| 21 |
from diffusers.utils import PIL_INTERPOLATION, logging, replace_example_docstring
|
| 22 |
from diffusers.pipelines.pipeline_flax_utils import FlaxDiffusionPipeline
|
| 23 |
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionPipelineOutput
|
| 24 |
+
from diffusers.pipelines.stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker
|
|
|
|
|
|
|
|
|
|
| 25 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 26 |
"""
|
| 27 |
Text2Video-Zero:
|
| 28 |
- Inputs: Prompt, Pose Control via mp4/gif, First Frame (?)
|
| 29 |
- JAX implementation
|
| 30 |
- 3DUnet to replace 2DUnetConditional
|
|
|
|
| 31 |
|
| 32 |
+
"""
|
| 33 |
|
| 34 |
def replicate_devices(array):
|
| 35 |
return jnp.expand_dims(array, 0).repeat(jax.device_count(), 0)
|
| 36 |
|
| 37 |
|
| 38 |
+
DEBUG = False # Set to True to use python for loop instead of jax.fori_loop for easier debugging
|
| 39 |
|
| 40 |
EXAMPLE_DOC_STRING = """
|
| 41 |
Examples:
|
|
|
|
| 94 |
>>> output_images.save("generated_image.png")
|
| 95 |
```
|
| 96 |
"""
|
|
|
|
|
|
|
| 97 |
class FlaxTextToVideoPipeline(FlaxDiffusionPipeline):
|
| 98 |
def __init__(
|
| 99 |
self,
|
|
|
|
| 104 |
unet_vanilla,
|
| 105 |
controlnet,
|
| 106 |
scheduler: Union[
|
| 107 |
+
FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
|
|
|
|
|
|
|
|
|
|
| 108 |
],
|
| 109 |
safety_checker: FlaxStableDiffusionSafetyChecker,
|
| 110 |
feature_extractor: CLIPFeatureExtractor,
|
|
|
|
| 142 |
else:
|
| 143 |
eps = jax.random.normal(prng, x0.shape, dtype=text_embeddings.dtype)
|
| 144 |
alpha_vec = jnp.prod(params["scheduler"].common.alphas[t0:tMax])
|
| 145 |
+
xt = jnp.sqrt(alpha_vec) * x0 + \
|
| 146 |
+
jnp.sqrt(1-alpha_vec) * eps
|
| 147 |
return xt
|
| 148 |
+
|
| 149 |
+
def DDIM_backward(self, params, num_inference_steps, timesteps, skip_t, t0, t1, do_classifier_free_guidance, text_embeddings, latents_local,
|
| 150 |
+
guidance_scale, controlnet_image=None, controlnet_conditioning_scale=None):
|
| 151 |
+
scheduler_state = self.scheduler.set_timesteps(params["scheduler"], num_inference_steps)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
f = latents_local.shape[2]
|
| 153 |
latents_local = rearrange(latents_local, "b c f h w -> (b f) c h w")
|
| 154 |
latents = latents_local.copy()
|
| 155 |
x_t0_1 = None
|
| 156 |
x_t1_1 = None
|
| 157 |
+
max_timestep = len(timesteps)-1
|
| 158 |
timesteps = jnp.array(timesteps)
|
|
|
|
| 159 |
def while_body(args):
|
| 160 |
step, latents, x_t0_1, x_t1_1, scheduler_state = args
|
| 161 |
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
| 162 |
+
latent_model_input = jnp.concatenate(
|
| 163 |
+
[latents] * 2) if do_classifier_free_guidance else latents
|
|
|
|
|
|
|
|
|
|
| 164 |
latent_model_input = self.scheduler.scale_model_input(
|
| 165 |
scheduler_state, latent_model_input, timestep=t
|
| 166 |
)
|
| 167 |
f = latents.shape[0]
|
| 168 |
+
te = jnp.stack([text_embeddings[0, :, :]]*f + [text_embeddings[-1,:,:]]*f)
|
|
|
|
|
|
|
| 169 |
timestep = jnp.broadcast_to(t, latent_model_input.shape[0])
|
| 170 |
if controlnet_image is not None:
|
| 171 |
down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
|
|
|
|
| 192 |
jnp.array(latent_model_input),
|
| 193 |
jnp.array(timestep, dtype=jnp.int32),
|
| 194 |
encoder_hidden_states=te,
|
| 195 |
+
).sample
|
| 196 |
# perform guidance
|
| 197 |
if do_classifier_free_guidance:
|
| 198 |
noise_pred_uncond, noise_pred_text = jnp.split(noise_pred, 2, axis=0)
|
| 199 |
+
noise_pred = noise_pred_uncond + guidance_scale * \
|
| 200 |
+
(noise_pred_text - noise_pred_uncond)
|
|
|
|
| 201 |
# compute the previous noisy sample x_t -> x_t-1
|
| 202 |
+
latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
|
| 203 |
+
x_t0_1 = jax.lax.select((step < max_timestep-1) & (timesteps[step+1] == t0), latents, x_t0_1)
|
| 204 |
+
x_t1_1 = jax.lax.select((step < max_timestep-1) & (timesteps[step+1] == t1), latents, x_t1_1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
return (step + 1, latents, x_t0_1, x_t1_1, scheduler_state)
|
| 206 |
+
|
| 207 |
latents_shape = latents.shape
|
| 208 |
x_t0_1, x_t1_1 = jnp.zeros(latents_shape), jnp.zeros(latents_shape)
|
| 209 |
|
| 210 |
def cond_fun(arg):
|
| 211 |
step, latents, x_t0_1, x_t1_1, scheduler_state = arg
|
| 212 |
return (step < skip_t) & (step < num_inference_steps)
|
| 213 |
+
|
| 214 |
if DEBUG:
|
| 215 |
step = 0
|
| 216 |
while cond_fun((step, latents, x_t0_1, x_t1_1)):
|
| 217 |
+
step, latents, x_t0_1, x_t1_1, scheduler_state = while_body((step, latents, x_t0_1, x_t1_1, scheduler_state))
|
|
|
|
|
|
|
| 218 |
step = step + 1
|
| 219 |
else:
|
| 220 |
+
_, latents, x_t0_1, x_t1_1, scheduler_state = jax.lax.while_loop(cond_fun, while_body, (0, latents, x_t0_1, x_t1_1, scheduler_state))
|
|
|
|
|
|
|
| 221 |
latents = rearrange(latents, "(b f) c h w -> b c f h w", f=f)
|
| 222 |
res = {"x0": latents.copy()}
|
| 223 |
if x_t0_1 is not None:
|
|
|
|
| 227 |
x_t1_1 = rearrange(x_t1_1, "(b f) c h w -> b c f h w", f=f)
|
| 228 |
res["x_t1_1"] = x_t1_1.copy()
|
| 229 |
return res
|
| 230 |
+
|
| 231 |
def warp_latents_independently(self, latents, reference_flow):
|
| 232 |
_, _, H, W = reference_flow.shape
|
| 233 |
b, _, f, h, w = latents.shape
|
|
|
|
| 238 |
coords_t0 = coords_t0.at[:, 1].set(coords_t0[:, 1] * h / H)
|
| 239 |
f, c, _, _ = coords_t0.shape
|
| 240 |
coords_t0 = jax.image.resize(coords_t0, (f, c, h, w), "linear")
|
| 241 |
+
coords_t0 = rearrange(coords_t0, 'f c h w -> f h w c')
|
| 242 |
+
latents_0 = rearrange(latents[0], 'c f h w -> f c h w')
|
| 243 |
warped = grid_sample(latents_0, coords_t0, "mirror")
|
| 244 |
+
warped = rearrange(warped, '(b f) c h w -> b c f h w', f=f)
|
| 245 |
return warped
|
| 246 |
|
| 247 |
def warp_vid_independently(self, vid, reference_flow):
|
|
|
|
| 253 |
coords_t0 = coords_t0.at[:, 1].set(coords_t0[:, 1] * h / H)
|
| 254 |
f, c, _, _ = coords_t0.shape
|
| 255 |
coords_t0 = jax.image.resize(coords_t0, (f, c, h, w), "linear")
|
| 256 |
+
coords_t0 = rearrange(coords_t0, 'f c h w -> f h w c')
|
| 257 |
# latents_0 = rearrange(vid, 'c f h w -> f c h w')
|
| 258 |
warped = grid_sample(vid, coords_t0, "zeropad")
|
| 259 |
# warped = rearrange(warped, 'f c h w -> b c f h w', f=f)
|
| 260 |
return warped
|
| 261 |
+
|
| 262 |
+
def create_motion_field(self, motion_field_strength_x, motion_field_strength_y, frame_ids, video_length, latents):
|
| 263 |
+
reference_flow = jnp.zeros(
|
| 264 |
+
(video_length-1, 2, 512, 512), dtype=latents.dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
for fr_idx, frame_id in enumerate(frame_ids):
|
| 266 |
+
reference_flow = reference_flow.at[fr_idx, 0, :,
|
| 267 |
+
:].set(motion_field_strength_x*(frame_id))
|
| 268 |
+
reference_flow = reference_flow.at[fr_idx, 1, :,
|
| 269 |
+
:].set(motion_field_strength_y*(frame_id))
|
|
|
|
|
|
|
| 270 |
return reference_flow
|
| 271 |
+
|
| 272 |
+
def create_motion_field_and_warp_latents(self, motion_field_strength_x, motion_field_strength_y, frame_ids, video_length, latents):
|
| 273 |
+
motion_field = self.create_motion_field(motion_field_strength_x=motion_field_strength_x,
|
| 274 |
+
motion_field_strength_y=motion_field_strength_y, latents=latents, video_length=video_length, frame_ids=frame_ids)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
for idx, latent in enumerate(latents):
|
| 276 |
+
latents = latents.at[idx].set(self.warp_latents_independently(
|
| 277 |
+
latent[None], motion_field)[0])
|
|
|
|
| 278 |
return motion_field, latents
|
| 279 |
|
| 280 |
+
def text_to_video_zero(self, params,
|
| 281 |
+
prng,
|
| 282 |
+
text_embeddings,
|
| 283 |
+
video_length: Optional[int],
|
| 284 |
+
do_classifier_free_guidance = True,
|
| 285 |
+
height: Optional[int] = None,
|
| 286 |
+
width: Optional[int] = None,
|
| 287 |
+
num_inference_steps: int = 50,
|
| 288 |
+
guidance_scale: float = 7.5,
|
| 289 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 290 |
+
xT = None,
|
| 291 |
+
smooth_bg_strength: float=0.,
|
| 292 |
+
motion_field_strength_x: float = 12,
|
| 293 |
+
motion_field_strength_y: float = 12,
|
| 294 |
+
t0: int = 44,
|
| 295 |
+
t1: int = 47,
|
| 296 |
+
controlnet_image=None,
|
| 297 |
+
controlnet_conditioning_scale=0,
|
| 298 |
+
):
|
|
|
|
|
|
|
| 299 |
frame_ids = list(range(video_length))
|
| 300 |
# Prepare timesteps
|
| 301 |
+
params["scheduler"] = self.scheduler.set_timesteps(params["scheduler"], num_inference_steps)
|
|
|
|
|
|
|
| 302 |
timesteps = params["scheduler"].timesteps
|
| 303 |
# Prepare latent variables
|
| 304 |
num_channels_latents = self.unet.in_channels
|
| 305 |
batch_size = 1
|
| 306 |
+
xT = prepare_latents(params, prng, batch_size * num_videos_per_prompt, num_channels_latents, height, width, self.vae_scale_factor, xT)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
|
| 308 |
+
timesteps_ddpm = [981, 961, 941, 921, 901, 881, 861, 841, 821, 801, 781, 761, 741, 721,
|
| 309 |
+
701, 681, 661, 641, 621, 601, 581, 561, 541, 521, 501, 481, 461, 441,
|
| 310 |
+
421, 401, 381, 361, 341, 321, 301, 281, 261, 241, 221, 201, 181, 161,
|
| 311 |
+
141, 121, 101, 81, 61, 41, 21, 1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
timesteps_ddpm.reverse()
|
| 313 |
t0 = timesteps_ddpm[t0]
|
| 314 |
t1 = timesteps_ddpm[t1]
|
| 315 |
x_t1_1 = None
|
| 316 |
|
| 317 |
# Denoising loop
|
| 318 |
+
shape = (batch_size, num_channels_latents, 1, height //
|
| 319 |
+
self.vae.scaling_factor, width // self.vae.scaling_factor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
|
| 321 |
# perform ∆t backward steps by stable diffusion
|
| 322 |
+
ddim_res = self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=1000, t0=t0, t1=t1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 323 |
+
text_embeddings=text_embeddings, latents_local=xT, guidance_scale=guidance_scale,
|
| 324 |
+
controlnet_image=jnp.stack([controlnet_image[0]] * 2), controlnet_conditioning_scale=controlnet_conditioning_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
x0 = ddim_res["x0"]
|
| 326 |
|
| 327 |
# apply warping functions
|
|
|
|
| 329 |
x_t0_1 = ddim_res["x_t0_1"]
|
| 330 |
if "x_t1_1" in ddim_res:
|
| 331 |
x_t1_1 = ddim_res["x_t1_1"]
|
| 332 |
+
x_t0_k = x_t0_1[:, :, :1, :, :].repeat(video_length-1, 2)
|
| 333 |
reference_flow, x_t0_k = self.create_motion_field_and_warp_latents(
|
| 334 |
+
motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_t0_k, video_length=video_length, frame_ids=frame_ids[1:])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
# assuming t0=t1=1000, if t0 = 1000
|
| 336 |
|
| 337 |
# DDPM forward for more motion freedom
|
| 338 |
+
ddpm_fwd = partial(self.DDPM_forward, params=params, prng=prng, x0=x_t0_k, t0=t0,
|
| 339 |
+
tMax=t1, shape=shape, text_embeddings=text_embeddings)
|
| 340 |
+
x_t1_k = jax.lax.cond(t1 > t0,
|
| 341 |
+
ddpm_fwd,
|
| 342 |
+
lambda:x_t0_k
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
)
|
|
|
|
| 344 |
x_t1 = jnp.concatenate([x_t1_1, x_t1_k], axis=2)
|
| 345 |
|
| 346 |
# backward stepts by stable diffusion
|
| 347 |
|
| 348 |
+
#warp the controlnet image following the same flow defined for latent
|
| 349 |
controlnet_video = controlnet_image[:video_length]
|
| 350 |
+
controlnet_video = controlnet_video.at[1:].set(self.warp_vid_independently(controlnet_video[1:], reference_flow))
|
| 351 |
+
controlnet_image = jnp.concatenate([controlnet_video]*2)
|
|
|
|
|
|
|
| 352 |
smooth_bg = True
|
| 353 |
|
| 354 |
if smooth_bg:
|
| 355 |
+
#latent shape: "b c f h w"
|
| 356 |
+
M_FG = repeat(get_mask_pose(controlnet_video), "f h w -> b c f h w", c=x_t1.shape[1], b=batch_size)
|
| 357 |
+
initial_bg = repeat(x_t1[:,:,0] * (1 - M_FG[:,:,0]), "b c h w -> b c f h w", f=video_length-1)
|
| 358 |
+
#warp the controlnet image following the same flow defined for latent #f c h w
|
| 359 |
+
initial_bg_warped = self.warp_latents_independently(initial_bg, reference_flow)
|
| 360 |
+
bgs = x_t1[:,:,1:] * (1 - M_FG[:,:,1:]) #initial background
|
| 361 |
+
initial_mask_warped = 1 - self.warp_latents_independently(repeat(M_FG[:,:,0], "b c h w -> b c f h w", f = video_length-1), reference_flow)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 362 |
# initial_mask_warped = 1 - warp_vid_independently(repeat(M_FG[:,:,0], "b c h w -> (b f) c h w", f = video_length-1), reference_flow)
|
| 363 |
# initial_mask_warped = rearrange(initial_mask_warped, "(b f) c h w -> b c f h w", b=batch_size)
|
| 364 |
+
mask = (1 - M_FG[:,:,1:]) * initial_mask_warped
|
| 365 |
+
x_t1 = x_t1.at[:,:,1:].set( (1 - mask) * x_t1[:,:,1:] + mask * (initial_bg_warped * smooth_bg_strength + (1 - smooth_bg_strength) * bgs))
|
| 366 |
+
|
| 367 |
+
ddim_res = self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=t1, t0=-1, t1=-1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 368 |
+
text_embeddings=text_embeddings, latents_local=x_t1, guidance_scale=guidance_scale,
|
| 369 |
+
controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 370 |
+
)
|
| 371 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
x0 = ddim_res["x0"]
|
| 373 |
del ddim_res
|
| 374 |
del x_t1
|
|
|
|
| 376 |
del x_t1_k
|
| 377 |
return x0
|
| 378 |
|
| 379 |
+
def denoise_latent(self, params, num_inference_steps, timesteps, do_classifier_free_guidance, text_embeddings, latents,
|
| 380 |
+
guidance_scale, controlnet_image=None, controlnet_conditioning_scale=None):
|
| 381 |
+
|
| 382 |
+
scheduler_state = self.scheduler.set_timesteps(params["scheduler"], num_inference_steps)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
# f = latents_local.shape[2]
|
| 384 |
# latents_local = rearrange(latents_local, "b c f h w -> (b f) c h w")
|
| 385 |
|
| 386 |
+
max_timestep = len(timesteps)-1
|
| 387 |
timesteps = jnp.array(timesteps)
|
|
|
|
| 388 |
def while_body(args):
|
| 389 |
step, latents, scheduler_state = args
|
| 390 |
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
| 391 |
+
latent_model_input = jnp.concatenate(
|
| 392 |
+
[latents] * 2) if do_classifier_free_guidance else latents
|
|
|
|
|
|
|
|
|
|
| 393 |
latent_model_input = self.scheduler.scale_model_input(
|
| 394 |
scheduler_state, latent_model_input, timestep=t
|
| 395 |
)
|
| 396 |
f = latents.shape[0]
|
| 397 |
+
te = jnp.stack([text_embeddings[0, :, :]]*f + [text_embeddings[-1,:,:]]*f)
|
|
|
|
|
|
|
| 398 |
timestep = jnp.broadcast_to(t, latent_model_input.shape[0])
|
| 399 |
if controlnet_image is not None:
|
| 400 |
down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
|
|
|
|
| 421 |
jnp.array(latent_model_input),
|
| 422 |
jnp.array(timestep, dtype=jnp.int32),
|
| 423 |
encoder_hidden_states=te,
|
| 424 |
+
).sample
|
| 425 |
# perform guidance
|
| 426 |
if do_classifier_free_guidance:
|
| 427 |
noise_pred_uncond, noise_pred_text = jnp.split(noise_pred, 2, axis=0)
|
| 428 |
+
noise_pred = noise_pred_uncond + guidance_scale * \
|
| 429 |
+
(noise_pred_text - noise_pred_uncond)
|
|
|
|
| 430 |
# compute the previous noisy sample x_t -> x_t-1
|
| 431 |
+
latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
|
|
|
|
|
|
|
| 432 |
return (step + 1, latents, scheduler_state)
|
| 433 |
+
|
| 434 |
def cond_fun(arg):
|
| 435 |
step, latents, scheduler_state = arg
|
| 436 |
+
return (step < num_inference_steps)
|
| 437 |
+
|
| 438 |
if DEBUG:
|
| 439 |
step = 0
|
| 440 |
while cond_fun((step, latents, scheduler_state)):
|
| 441 |
+
step, latents, scheduler_state = while_body((step, latents, scheduler_state))
|
|
|
|
|
|
|
| 442 |
step = step + 1
|
| 443 |
else:
|
| 444 |
+
_, latents, scheduler_state = jax.lax.while_loop(cond_fun, while_body, (0, latents, scheduler_state))
|
|
|
|
|
|
|
| 445 |
# latents = rearrange(latents, "(b f) c h w -> b c f h w", f=f)
|
| 446 |
return latents
|
| 447 |
|
| 448 |
+
def generate_starting_frames(self,
|
| 449 |
+
params,
|
| 450 |
+
prngs: list, #list of prngs for each img
|
| 451 |
+
prompt,
|
| 452 |
+
neg_prompt,
|
| 453 |
+
controlnet_image,
|
| 454 |
+
do_classifier_free_guidance = True,
|
| 455 |
+
num_inference_steps: int = 50,
|
| 456 |
+
guidance_scale: float = 7.5,
|
| 457 |
+
t0: int = 44,
|
| 458 |
+
t1: int = 47,
|
| 459 |
+
controlnet_conditioning_scale=1.,
|
| 460 |
+
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 461 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
height, width = controlnet_image.shape[-2:]
|
| 463 |
if height % 64 != 0 or width % 64 != 0:
|
| 464 |
+
raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
|
|
|
|
|
|
|
| 465 |
|
| 466 |
+
shape = (self.unet.in_channels, height //
|
| 467 |
+
self.vae_scale_factor, width // self.vae_scale_factor) # c h w
|
|
|
|
|
|
|
|
|
|
| 468 |
# scale the initial noise by the standard deviation required by the scheduler
|
| 469 |
|
| 470 |
+
# print(f"Generating {len(prngs)} first frames with prompt {prompt}, for {num_inference_steps} steps. PRNG seeds are: {prngs}")
|
|
|
|
|
|
|
| 471 |
|
| 472 |
+
latents = jnp.stack([jax.random.normal(prng, shape) for prng in prngs]) # b c h w
|
|
|
|
|
|
|
| 473 |
latents = latents * params["scheduler"].init_noise_sigma
|
| 474 |
|
| 475 |
timesteps = params["scheduler"].timesteps
|
| 476 |
+
timesteps_ddpm = [981, 961, 941, 921, 901, 881, 861, 841, 821, 801, 781, 761, 741, 721,
|
| 477 |
+
701, 681, 661, 641, 621, 601, 581, 561, 541, 521, 501, 481, 461, 441,
|
| 478 |
+
421, 401, 381, 361, 341, 321, 301, 281, 261, 241, 221, 201, 181, 161,
|
| 479 |
+
141, 121, 101, 81, 61, 41, 21, 1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 480 |
timesteps_ddpm.reverse()
|
| 481 |
t0 = timesteps_ddpm[t0]
|
| 482 |
t1 = timesteps_ddpm[t1]
|
| 483 |
|
| 484 |
# get prompt text embeddings
|
| 485 |
+
prompt_ids = shard(self.prepare_text_inputs(prompt))
|
| 486 |
+
|
| 487 |
+
# prompt_embeds = jax.pmap( lambda prompt_ids, params: )(prompt_ids, params)
|
| 488 |
+
|
| 489 |
+
@jax.pmap
|
| 490 |
+
def prepare_text(params, prompt_ids, uncond_input):
|
| 491 |
+
prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
|
| 492 |
+
negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
|
| 493 |
+
text_embeddings = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
|
| 494 |
+
return text_embeddings
|
| 495 |
|
| 496 |
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
| 497 |
# implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
|
| 498 |
batch_size = 1
|
| 499 |
max_length = prompt_ids.shape[-1]
|
| 500 |
if neg_prompt is None:
|
| 501 |
+
uncond_input = shard(self.tokenizer(
|
| 502 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
|
| 503 |
+
).input_ids)
|
|
|
|
|
|
|
|
|
|
| 504 |
else:
|
| 505 |
neg_prompt_ids = self.prepare_text_inputs(neg_prompt)
|
| 506 |
+
uncond_input = shard(neg_prompt_ids)
|
| 507 |
|
| 508 |
+
text_embeddings = prepare_text(params, prompt_ids, uncond_input)
|
| 509 |
+
|
| 510 |
+
controlnet_image = shard(jnp.stack([controlnet_image[0]] * len(prngs) * 2))
|
| 511 |
+
|
| 512 |
+
timesteps = shard(jnp.array(timesteps))
|
| 513 |
+
guidance_scale = shard(jnp.array(guidance_scale))
|
| 514 |
+
controlnet_conditioning_scale = shard(jnp.array(controlnet_conditioning_scale))
|
| 515 |
+
|
| 516 |
+
#latent is shape # b c h w
|
| 517 |
+
# vmap_gen_start_frame = jax.vmap(lambda latent: p_generate_starting_frames(self, num_inference_steps, params, timesteps, text_embeddings, shard(latent[None]), guidance_scale, controlnet_image, controlnet_conditioning_scale))
|
| 518 |
+
# decoded_latents = vmap_gen_start_frame(latents)
|
| 519 |
+
decoded_latents = p_generate_starting_frames(self, num_inference_steps, params, timesteps, text_embeddings, shard(latents), guidance_scale, controlnet_image, controlnet_conditioning_scale)
|
| 520 |
+
# print(f"shape output: {decoded_latents.shape}")
|
| 521 |
+
return unshard(decoded_latents)#[:, 0]
|
|
|
|
| 522 |
|
| 523 |
def generate_video(
|
| 524 |
self,
|
|
|
|
| 533 |
controlnet_conditioning_scale: Union[float, jnp.array] = 1.0,
|
| 534 |
return_dict: bool = True,
|
| 535 |
jit: bool = False,
|
| 536 |
+
xT = None,
|
| 537 |
+
smooth_bg_strength: float=0.,
|
| 538 |
motion_field_strength_x: float = 3,
|
| 539 |
motion_field_strength_y: float = 4,
|
| 540 |
t0: int = 44,
|
|
|
|
| 600 |
if isinstance(controlnet_conditioning_scale, float):
|
| 601 |
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
| 602 |
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
| 603 |
+
controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0])
|
|
|
|
|
|
|
| 604 |
if len(prompt_ids.shape) > 2:
|
| 605 |
# Assume sharded
|
| 606 |
controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
|
|
|
|
| 614 |
num_inference_steps,
|
| 615 |
replicate_devices(guidance_scale),
|
| 616 |
replicate_devices(latents) if latents is not None else None,
|
| 617 |
+
replicate_devices(neg_prompt_ids) if neg_prompt_ids is not None else None,
|
|
|
|
|
|
|
| 618 |
replicate_devices(controlnet_conditioning_scale),
|
| 619 |
replicate_devices(xT) if xT is not None else None,
|
| 620 |
replicate_devices(smooth_bg_strength),
|
|
|
|
| 645 |
safety_params = params["safety_checker"]
|
| 646 |
images_uint8_casted = (images * 255).round().astype("uint8")
|
| 647 |
num_devices, batch_size = images.shape[:2]
|
| 648 |
+
images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
|
| 649 |
+
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 650 |
images = np.asarray(images)
|
| 651 |
# block images
|
| 652 |
if any(has_nsfw_concept):
|
|
|
|
| 659 |
has_nsfw_concept = False
|
| 660 |
if not return_dict:
|
| 661 |
return (images, has_nsfw_concept)
|
| 662 |
+
return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
| 663 |
|
| 664 |
def prepare_text_inputs(self, prompt: Union[str, List[str]]):
|
| 665 |
if not isinstance(prompt, (str, list)):
|
| 666 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
|
|
|
|
|
|
| 667 |
text_input = self.tokenizer(
|
| 668 |
prompt,
|
| 669 |
padding="max_length",
|
|
|
|
| 672 |
return_tensors="np",
|
| 673 |
)
|
| 674 |
return text_input.input_ids
|
|
|
|
| 675 |
def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]):
|
| 676 |
if not isinstance(image, (Image.Image, list)):
|
| 677 |
+
raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
|
|
|
|
|
|
|
| 678 |
if isinstance(image, Image.Image):
|
| 679 |
image = [image]
|
| 680 |
+
processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
|
|
|
|
|
|
|
| 681 |
return processed_images
|
|
|
|
| 682 |
def _get_has_nsfw_concepts(self, features, params):
|
| 683 |
has_nsfw_concepts = self.safety_checker(features, params)
|
| 684 |
return has_nsfw_concepts
|
|
|
|
| 685 |
def _run_safety_checker(self, images, safety_model_params, jit=False):
|
| 686 |
# safety_model_params should already be replicated when jit is True
|
| 687 |
pil_images = [Image.fromarray(image) for image in images]
|
| 688 |
features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
|
| 689 |
if jit:
|
| 690 |
features = shard(features)
|
| 691 |
+
has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
|
|
|
|
|
|
|
| 692 |
has_nsfw_concepts = unshard(has_nsfw_concepts)
|
| 693 |
safety_model_params = unreplicate(safety_model_params)
|
| 694 |
else:
|
| 695 |
+
has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
|
|
|
|
|
|
|
| 696 |
images_was_copied = False
|
| 697 |
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
|
| 698 |
if has_nsfw_concept:
|
|
|
|
| 706 |
" instead. Try again with a different prompt and/or seed."
|
| 707 |
)
|
| 708 |
return images, has_nsfw_concepts
|
|
|
|
| 709 |
def _generate(
|
| 710 |
self,
|
| 711 |
prompt_ids: jnp.array,
|
|
|
|
| 717 |
latents: Optional[jnp.array] = None,
|
| 718 |
neg_prompt_ids: Optional[jnp.array] = None,
|
| 719 |
controlnet_conditioning_scale: float = 1.0,
|
| 720 |
+
xT = None,
|
| 721 |
+
smooth_bg_strength: float = 0.,
|
| 722 |
motion_field_strength_x: float = 12,
|
| 723 |
motion_field_strength_y: float = 12,
|
| 724 |
t0: int = 44,
|
|
|
|
| 727 |
height, width = image.shape[-2:]
|
| 728 |
video_length = image.shape[0]
|
| 729 |
if height % 64 != 0 or width % 64 != 0:
|
| 730 |
+
raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
|
|
|
|
|
|
|
| 731 |
# get prompt text embeddings
|
| 732 |
prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
|
| 733 |
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
|
|
|
| 736 |
max_length = prompt_ids.shape[-1]
|
| 737 |
if neg_prompt_ids is None:
|
| 738 |
uncond_input = self.tokenizer(
|
| 739 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
|
|
|
|
|
|
|
|
|
|
| 740 |
).input_ids
|
| 741 |
else:
|
| 742 |
uncond_input = neg_prompt_ids
|
| 743 |
+
negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
|
|
|
|
|
|
|
| 744 |
context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
|
| 745 |
image = jnp.concatenate([image] * 2)
|
| 746 |
seed_t2vz, prng_seed = jax.random.split(prng_seed)
|
| 747 |
+
#get the latent following text to video zero
|
| 748 |
+
latents = self.text_to_video_zero(params, seed_t2vz, text_embeddings=context, video_length=video_length,
|
| 749 |
+
height=height, width = width, num_inference_steps=num_inference_steps,
|
| 750 |
+
guidance_scale=guidance_scale, controlnet_image=image,
|
| 751 |
+
xT=xT, smooth_bg_strength=smooth_bg_strength, t0=t0, t1=t1,
|
| 752 |
+
motion_field_strength_x=motion_field_strength_x,
|
| 753 |
+
motion_field_strength_y=motion_field_strength_y,
|
| 754 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale
|
| 755 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 756 |
# scale and decode the image latents with vae
|
| 757 |
latents = 1 / self.vae.config.scaling_factor * latents
|
| 758 |
latents = rearrange(latents, "b c f h w -> (b f) c h w")
|
| 759 |
+
video = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
|
|
|
|
|
|
|
| 760 |
video = (video / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
| 761 |
return video
|
| 762 |
+
|
| 763 |
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 764 |
def __call__(
|
| 765 |
self,
|
|
|
|
| 774 |
controlnet_conditioning_scale: Union[float, jnp.array] = 1.0,
|
| 775 |
return_dict: bool = True,
|
| 776 |
jit: bool = False,
|
| 777 |
+
xT = None,
|
| 778 |
+
smooth_bg_strength: float = 0.,
|
| 779 |
motion_field_strength_x: float = 3,
|
| 780 |
motion_field_strength_y: float = 4,
|
| 781 |
t0: int = 44,
|
|
|
|
| 832 |
if isinstance(controlnet_conditioning_scale, float):
|
| 833 |
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
| 834 |
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
| 835 |
+
controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0])
|
|
|
|
|
|
|
| 836 |
if len(prompt_ids.shape) > 2:
|
| 837 |
# Assume sharded
|
| 838 |
controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
|
|
|
|
| 877 |
safety_params = params["safety_checker"]
|
| 878 |
images_uint8_casted = (images * 255).round().astype("uint8")
|
| 879 |
num_devices, batch_size = images.shape[:2]
|
| 880 |
+
images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
|
| 881 |
+
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
images = np.asarray(images)
|
| 883 |
# block images
|
| 884 |
if any(has_nsfw_concept):
|
|
|
|
| 891 |
has_nsfw_concept = False
|
| 892 |
if not return_dict:
|
| 893 |
return (images, has_nsfw_concept)
|
| 894 |
+
return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
| 895 |
|
| 896 |
|
| 897 |
# Static argnums are pipe, num_inference_steps. A change would trigger recompilation.
|
|
|
|
| 899 |
@partial(
|
| 900 |
jax.pmap,
|
| 901 |
in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0, 0, 0, 0, 0, None, None),
|
| 902 |
+
static_broadcasted_argnums=(0, 5, 14, 15)
|
| 903 |
)
|
| 904 |
def _p_generate(
|
| 905 |
pipe,
|
| 906 |
+
prompt_ids,
|
| 907 |
image,
|
| 908 |
params,
|
| 909 |
prng_seed,
|
|
|
|
| 936 |
t0,
|
| 937 |
t1,
|
| 938 |
)
|
|
|
|
|
|
|
| 939 |
@partial(jax.pmap, static_broadcasted_argnums=(0,))
|
| 940 |
def _p_get_has_nsfw_concepts(pipe, features, params):
|
| 941 |
return pipe._get_has_nsfw_concepts(features, params)
|
| 942 |
|
| 943 |
+
@partial(
|
| 944 |
+
jax.pmap,
|
| 945 |
+
in_axes=(None, None, 0, 0, 0, 0, 0, 0, 0),
|
| 946 |
+
static_broadcasted_argnums=(0, 1)
|
| 947 |
+
)
|
| 948 |
+
def p_generate_starting_frames(pipe, num_inference_steps, params, timesteps, text_embeddings, latents, guidance_scale, controlnet_image, controlnet_conditioning_scale):
|
| 949 |
+
# perform ∆t backward steps by stable diffusion
|
| 950 |
+
# delta_t_diffusion = jax.vmap(lambda latent : self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=1000, t0=t0, t1=t1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 951 |
+
# text_embeddings=text_embeddings, latents_local=latent, guidance_scale=guidance_scale,
|
| 952 |
+
# controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale))
|
| 953 |
+
# ddim_res = delta_t_diffusion(latents)
|
| 954 |
+
# latents = ddim_res["x0"] #output is i b c f h w
|
| 955 |
+
|
| 956 |
+
# DDPM forward for more motion freedom
|
| 957 |
+
# ddpm_fwd = jax.vmap(lambda prng, latent: self.DDPM_forward(params=params, prng=prng, x0=latent, t0=t0,
|
| 958 |
+
# tMax=t1, shape=shape, text_embeddings=text_embeddings))
|
| 959 |
+
# latents = ddpm_fwd(stacked_prngs, latents)
|
| 960 |
+
# main backward diffusion
|
| 961 |
+
# denoise_first_frame = lambda latent : self.DDIM_backward(params, num_inference_steps=num_inference_steps, timesteps=timesteps, skip_t=100000, t0=-1, t1=-1, do_classifier_free_guidance=do_classifier_free_guidance,
|
| 962 |
+
# text_embeddings=text_embeddings, latents_local=latent, guidance_scale=guidance_scale,
|
| 963 |
+
# controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale)
|
| 964 |
+
# latents = rearrange(latents, 'i b c f h w -> (i b) c f h w')
|
| 965 |
+
# ddim_res = denoise_first_frame(latents)
|
| 966 |
+
latents = pipe.denoise_latent(params, num_inference_steps=num_inference_steps, timesteps=timesteps, do_classifier_free_guidance=True,
|
| 967 |
+
text_embeddings=text_embeddings, latents=latents, guidance_scale=guidance_scale,
|
| 968 |
+
controlnet_image=controlnet_image, controlnet_conditioning_scale=controlnet_conditioning_scale)
|
| 969 |
+
# latents = rearrange(ddim_res["x0"], 'i b c f h w -> (i b) c f h w') #output is i b c f h w
|
| 970 |
+
|
| 971 |
+
# scale and decode the image latents with vae
|
| 972 |
+
latents = 1 / pipe.vae.config.scaling_factor * latents
|
| 973 |
+
# latents = rearrange(latents, "b c h w -> (b f) c h w")
|
| 974 |
+
imgs = pipe.vae.apply({"params": params["vae"]}, latents, method=pipe.vae.decode).sample
|
| 975 |
+
imgs = (imgs / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
| 976 |
+
return imgs
|
| 977 |
+
|
| 978 |
+
|
| 979 |
|
| 980 |
def unshard(x: jnp.ndarray):
|
| 981 |
# einops.rearrange(x, 'd b ... -> (d b) ...')
|
| 982 |
num_devices, batch_size = x.shape[:2]
|
| 983 |
rest = x.shape[2:]
|
| 984 |
return x.reshape(num_devices * batch_size, *rest)
|
|
|
|
|
|
|
| 985 |
def preprocess(image, dtype):
|
| 986 |
image = image.convert("RGB")
|
| 987 |
w, h = image.size
|
|
|
|
| 991 |
image = image[None].transpose(0, 3, 1, 2)
|
| 992 |
return image
|
| 993 |
|
| 994 |
+
def prepare_latents(params, prng, batch_size, num_channels_latents, height, width, vae_scale_factor, latents=None):
|
| 995 |
+
shape = (batch_size, num_channels_latents, 1, height //
|
| 996 |
+
vae_scale_factor, width // vae_scale_factor) #b c f h w
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 997 |
# scale the initial noise by the standard deviation required by the scheduler
|
| 998 |
if latents is None:
|
| 999 |
latents = jax.random.normal(prng, shape)
|
| 1000 |
latents = latents * params["scheduler"].init_noise_sigma
|
| 1001 |
return latents
|
| 1002 |
|
|
|
|
| 1003 |
def coords_grid(batch, ht, wd):
|
| 1004 |
coords = jnp.meshgrid(jnp.arange(ht), jnp.arange(wd), indexing="ij")
|
| 1005 |
coords = jnp.stack(coords[::-1], axis=0)
|
| 1006 |
return coords[None].repeat(batch, 0)
|
| 1007 |
|
|
|
|
| 1008 |
def adapt_pos_mirror(x, y, W, H):
|
| 1009 |
+
#adapt the position, with mirror padding
|
| 1010 |
+
x_w_mirror = ((x + W - 1) % (2*(W - 1))) - W + 1
|
| 1011 |
+
x_adapted = jnp.where(x_w_mirror > 0, x_w_mirror, - (x_w_mirror))
|
| 1012 |
+
y_w_mirror = ((y + H - 1) % (2*(H - 1))) - H + 1
|
| 1013 |
+
y_adapted = jnp.where(y_w_mirror > 0, y_w_mirror, - (y_w_mirror))
|
| 1014 |
+
return y_adapted, x_adapted
|
|
|
|
| 1015 |
|
| 1016 |
+
def safe_get_zeropad(img, x,y,W,H):
|
| 1017 |
+
return jnp.where((x < W) & (x > 0) & (y < H) & (y > 0), img[y,x], 0.)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1018 |
|
| 1019 |
+
def safe_get_mirror(img, x,y,W,H):
|
| 1020 |
+
return img[adapt_pos_mirror(x,y,W,H)]
|
| 1021 |
|
| 1022 |
@partial(jax.vmap, in_axes=(0, 0, None))
|
| 1023 |
@partial(jax.vmap, in_axes=(0, None, None))
|
| 1024 |
+
@partial(jax.vmap, in_axes=(None,0, None))
|
| 1025 |
@partial(jax.vmap, in_axes=(None, 0, None))
|
| 1026 |
def grid_sample(latents, grid, method):
|
| 1027 |
# this is an alternative to torch.functional.nn.grid_sample in jax
|
| 1028 |
# this implementation is following the algorithm described @ https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
|
| 1029 |
# but with coordinates scaled to the size of the image
|
| 1030 |
if method == "mirror":
|
| 1031 |
+
return safe_get_mirror(latents, jnp.array(grid[0], dtype=jnp.int16), jnp.array(grid[1], dtype=jnp.int16), latents.shape[0], latents.shape[1])
|
| 1032 |
+
else: #default is zero padding
|
| 1033 |
+
return safe_get_zeropad(latents, jnp.array(grid[0], dtype=jnp.int16), jnp.array(grid[1], dtype=jnp.int16), latents.shape[0], latents.shape[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1034 |
|
| 1035 |
def bandw_vid(vid, threshold):
|
| 1036 |
+
vid = jnp.max(vid, axis=1)
|
| 1037 |
+
return jnp.where(vid > threshold, 1, 0)
|
|
|
|
| 1038 |
|
| 1039 |
def mean_blur(vid, k):
|
| 1040 |
+
window = jnp.ones((vid.shape[0], k, k))/ (k*k)
|
| 1041 |
+
convolve=jax.vmap(lambda img, kernel:jax.scipy.signal.convolve(img, kernel, mode='same'))
|
| 1042 |
+
smooth_vid = convolve(vid, window)
|
| 1043 |
+
return smooth_vid
|
|
|
|
|
|
|
|
|
|
| 1044 |
|
| 1045 |
def get_mask_pose(vid):
|
| 1046 |
+
vid = bandw_vid(vid, 0.4)
|
| 1047 |
+
l, h, w = vid.shape
|
| 1048 |
+
vid = jax.image.resize(vid, (l, h//8, w//8), "nearest")
|
| 1049 |
+
vid=bandw_vid(mean_blur(vid, 7)[:,None], threshold=0.01)
|
| 1050 |
+
return vid/(jnp.max(vid) + 1e-4)
|
| 1051 |
+
#return jax.image.resize(vid/(jnp.max(vid) + 1e-4), (l, h, w), "nearest")
|
utils/gradio_utils.py
CHANGED
|
@@ -3,11 +3,15 @@ import os
|
|
| 3 |
# App Pose utils
|
| 4 |
def motion_to_video_path(motion):
|
| 5 |
videos = [
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
"__assets__/dance1_corr.mp4",
|
| 7 |
"__assets__/dance2_corr.mp4",
|
| 8 |
"__assets__/dance3_corr.mp4",
|
| 9 |
"__assets__/dance4_corr.mp4",
|
| 10 |
-
"__assets__/dance5_corr.mp4"
|
| 11 |
]
|
| 12 |
if len(motion.split(" ")) > 1 and motion.split(" ")[1].isnumeric():
|
| 13 |
id = int(motion.split(" ")[1]) - 1
|
|
|
|
| 3 |
# App Pose utils
|
| 4 |
def motion_to_video_path(motion):
|
| 5 |
videos = [
|
| 6 |
+
"__assets__/walk_01.mp4",
|
| 7 |
+
"__assets__/walk_02.mp4",
|
| 8 |
+
"__assets__/walk_03.mp4",
|
| 9 |
+
"__assets__/run.mp4",
|
| 10 |
"__assets__/dance1_corr.mp4",
|
| 11 |
"__assets__/dance2_corr.mp4",
|
| 12 |
"__assets__/dance3_corr.mp4",
|
| 13 |
"__assets__/dance4_corr.mp4",
|
| 14 |
+
"__assets__/dance5_corr.mp4",
|
| 15 |
]
|
| 16 |
if len(motion.split(" ")) > 1 and motion.split(" ")[1].isnumeric():
|
| 17 |
id = int(motion.split(" ")[1]) - 1
|
webui/app_control_animation.py
CHANGED
|
@@ -6,23 +6,35 @@ from utils.hf_utils import get_model_list
|
|
| 6 |
huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
|
| 7 |
on_huggingspace = huggingspace_name if huggingspace_name is not None else False
|
| 8 |
|
| 9 |
-
examples = [
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
def on_video_path_update(evt: gr.EventData):
|
| 23 |
return f"Selection: **{evt._data}**"
|
| 24 |
|
| 25 |
-
|
| 26 |
def pose_gallery_callback(evt: gr.SelectData):
|
| 27 |
return f"Motion {evt.index+1}"
|
| 28 |
|
|
@@ -134,31 +146,37 @@ def create_demo(model: ControlAnimationModel):
|
|
| 134 |
gallery_pose_sequence = gr.Gallery(
|
| 135 |
label="Pose Sequence",
|
| 136 |
value=[
|
| 137 |
-
("__assets__/
|
| 138 |
-
("__assets__/
|
| 139 |
-
("__assets__/
|
| 140 |
-
("__assets__/
|
| 141 |
-
("__assets__/
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
],
|
| 143 |
-
).style(
|
| 144 |
input_video_path = gr.Textbox(
|
| 145 |
label="Pose Sequence", visible=False, value="Motion 1"
|
| 146 |
)
|
| 147 |
pose_sequence_selector = gr.Markdown("Pose Sequence: **Motion 1**")
|
| 148 |
|
| 149 |
-
with gr.
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
| 153 |
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
| 159 |
|
| 160 |
-
|
| 161 |
-
|
| 162 |
|
| 163 |
with gr.Box(visible=False):
|
| 164 |
initial_frame_index = gr.Number(
|
|
@@ -191,17 +209,17 @@ def create_demo(model: ControlAnimationModel):
|
|
| 191 |
seed,
|
| 192 |
]
|
| 193 |
|
| 194 |
-
def submit_select(initial_frame_index: int):
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
|
| 206 |
gen_frames_button.click(
|
| 207 |
fn=model.generate_initial_frames,
|
|
@@ -209,12 +227,18 @@ def create_demo(model: ControlAnimationModel):
|
|
| 209 |
outputs=initial_frames,
|
| 210 |
)
|
| 211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
gen_animation_button.click(
|
| 213 |
-
fn=
|
| 214 |
-
inputs=initial_frame_index,
|
| 215 |
-
outputs=[frame_selection_view, animation_view],
|
| 216 |
-
).then(
|
| 217 |
-
fn=None,
|
| 218 |
inputs=animation_inputs,
|
| 219 |
outputs=result,
|
| 220 |
)
|
|
@@ -227,4 +251,12 @@ def create_demo(model: ControlAnimationModel):
|
|
| 227 |
# cache_examples=on_huggingspace,
|
| 228 |
# )
|
| 229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
return demo
|
|
|
|
| 6 |
huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
|
| 7 |
on_huggingspace = huggingspace_name if huggingspace_name is not None else False
|
| 8 |
|
| 9 |
+
examples = [["A surfer in miami walking by the beach",
|
| 10 |
+
None,
|
| 11 |
+
"Motion 3",
|
| 12 |
+
None,
|
| 13 |
+
3,
|
| 14 |
+
0,
|
| 15 |
+
None,
|
| 16 |
+
None,
|
| 17 |
+
None,
|
| 18 |
+
None,
|
| 19 |
+
None,
|
| 20 |
+
None,
|
| 21 |
+
0],
|
| 22 |
+
]
|
| 23 |
+
# examples = [
|
| 24 |
+
# ["an astronaut waving the arm on the moon"],
|
| 25 |
+
# ["a sloth surfing on a wakeboard"],
|
| 26 |
+
# ["an astronaut walking on a street"],
|
| 27 |
+
# ["a cute cat walking on grass"],
|
| 28 |
+
# ["a horse is galloping on a street"],
|
| 29 |
+
# ["an astronaut is skiing down the hill"],
|
| 30 |
+
# ["a gorilla walking alone down the street"],
|
| 31 |
+
# ["a gorilla dancing on times square"],
|
| 32 |
+
# ["A panda dancing dancing like crazy on Times Square"],
|
| 33 |
+
# ]
|
| 34 |
|
| 35 |
def on_video_path_update(evt: gr.EventData):
|
| 36 |
return f"Selection: **{evt._data}**"
|
| 37 |
|
|
|
|
| 38 |
def pose_gallery_callback(evt: gr.SelectData):
|
| 39 |
return f"Motion {evt.index+1}"
|
| 40 |
|
|
|
|
| 146 |
gallery_pose_sequence = gr.Gallery(
|
| 147 |
label="Pose Sequence",
|
| 148 |
value=[
|
| 149 |
+
("__assets__/walk_01.gif", "Motion 1"),
|
| 150 |
+
("__assets__/walk_02.gif", "Motion 2"),
|
| 151 |
+
("__assets__/walk_03.gif", "Motion 3"),
|
| 152 |
+
("__assets__/run.gif", "Motion 4"),
|
| 153 |
+
("__assets__/dance1.gif", "Motion 5"),
|
| 154 |
+
("__assets__/dance2.gif", "Motion 6"),
|
| 155 |
+
("__assets__/dance3.gif", "Motion 7"),
|
| 156 |
+
("__assets__/dance4.gif", "Motion 8"),
|
| 157 |
+
("__assets__/dance5.gif", "Motion 9"),
|
| 158 |
],
|
| 159 |
+
).style(columns=3)
|
| 160 |
input_video_path = gr.Textbox(
|
| 161 |
label="Pose Sequence", visible=False, value="Motion 1"
|
| 162 |
)
|
| 163 |
pose_sequence_selector = gr.Markdown("Pose Sequence: **Motion 1**")
|
| 164 |
|
| 165 |
+
with gr.Row():
|
| 166 |
+
with gr.Column(visible=True) as frame_selection_view:
|
| 167 |
+
initial_frames = gr.Gallery(
|
| 168 |
+
label="Initial Frames", show_label=False
|
| 169 |
+
).style(columns=4, rows=1, object_fit="contain", preview=True)
|
| 170 |
|
| 171 |
+
gr.Markdown("Select an initial frame to start your animation with.")
|
| 172 |
+
|
| 173 |
+
gen_animation_button = gr.Button(
|
| 174 |
+
value="Select Initial Frame & Generate Animation",
|
| 175 |
+
variant="secondary",
|
| 176 |
+
)
|
| 177 |
|
| 178 |
+
with gr.Column(visible=True) as animation_view:
|
| 179 |
+
result = gr.Image(label="Generated Video")
|
| 180 |
|
| 181 |
with gr.Box(visible=False):
|
| 182 |
initial_frame_index = gr.Number(
|
|
|
|
| 209 |
seed,
|
| 210 |
]
|
| 211 |
|
| 212 |
+
# def submit_select(initial_frame_index: int):
|
| 213 |
+
# if initial_frame_index != -1: # More to next step
|
| 214 |
+
# return {
|
| 215 |
+
# frame_selection_view: gr.update(visible=False),
|
| 216 |
+
# animation_view: gr.update(visible=True),
|
| 217 |
+
# }
|
| 218 |
|
| 219 |
+
# return {
|
| 220 |
+
# frame_selection_view: gr.update(visible=True),
|
| 221 |
+
# animation_view: gr.update(visible=False),
|
| 222 |
+
# }
|
| 223 |
|
| 224 |
gen_frames_button.click(
|
| 225 |
fn=model.generate_initial_frames,
|
|
|
|
| 227 |
outputs=initial_frames,
|
| 228 |
)
|
| 229 |
|
| 230 |
+
# gen_animation_button.click(
|
| 231 |
+
# fn=submit_select,
|
| 232 |
+
# inputs=initial_frame_index,
|
| 233 |
+
# outputs=[frame_selection_view, animation_view],
|
| 234 |
+
# ).then(
|
| 235 |
+
# fn=model.generate_animation,
|
| 236 |
+
# inputs=animation_inputs,
|
| 237 |
+
# outputs=result,
|
| 238 |
+
# )
|
| 239 |
+
|
| 240 |
gen_animation_button.click(
|
| 241 |
+
fn=model.generate_animation,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
inputs=animation_inputs,
|
| 243 |
outputs=result,
|
| 244 |
)
|
|
|
|
| 251 |
# cache_examples=on_huggingspace,
|
| 252 |
# )
|
| 253 |
|
| 254 |
+
gr.Examples(examples=examples,
|
| 255 |
+
inputs=animation_inputs,
|
| 256 |
+
outputs=result,
|
| 257 |
+
fn=model.generate_animation,
|
| 258 |
+
cache_examples=on_huggingspace,
|
| 259 |
+
run_on_click=True,
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
return demo
|