jingwwu commited on
Commit
efe4293
·
verified ·
1 Parent(s): e4b2ec9

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/1.jpg filter=lfs diff=lfs merge=lfs -text
37
+ assets/2.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ from PIL import Image
5
+
6
+ # import spaces #[uncomment to use ZeroGPU]
7
+ import torch
8
+
9
+ from transformers import AutoTokenizer, AutoModel
10
+ from models.gen_pipeline import NextStepPipeline
11
+ from utils.aspect_ratio import center_crop_arr_with_buckets
12
+
13
+ HF_HUB = "stepfun-ai/NextStep-1-Large-Edit"
14
+ device = "cuda" if torch.cuda.is_available() else "cpu"
15
+
16
+ tokenizer = AutoTokenizer.from_pretrained(HF_HUB, local_files_only=False, trust_remote_code=True)
17
+ model = AutoModel.from_pretrained(HF_HUB, local_files_only=False, trust_remote_code=True)
18
+ pipeline = NextStepPipeline(tokenizer=tokenizer, model=model).to(device=device)
19
+
20
+ MAX_SEED = np.iinfo(np.int16).max
21
+ MAX_IMAGE_SIZE = 512
22
+
23
+ DEFAULT_POSITIVE_PROMPT = None
24
+ DEFAULT_NEGATIVE_PROMPT = "copy the original image"
25
+
26
+ # @spaces.GPU #[uncomment to use ZeroGPU]
27
+ def infer(
28
+ prompt=None,
29
+ ref=None,
30
+ seed=0,
31
+ text_cfg=7.5,
32
+ img_cfg=2.0,
33
+ num_inference_steps=30,
34
+ positive_prompt=DEFAULT_POSITIVE_PROMPT,
35
+ negative_prompt=DEFAULT_NEGATIVE_PROMPT,
36
+ progress=gr.Progress(track_tqdm=True),
37
+ ):
38
+ if ref is None:
39
+ gr.Warning("⚠️ 请上传图片!")
40
+ return None
41
+
42
+ if prompt in [None, ""]:
43
+ gr.Warning("⚠️ 请输入提示词!")
44
+ return None
45
+
46
+ if ref is not None:
47
+ editing_caption = "<image>" + prompt
48
+ input_image = ref
49
+ input_image = center_crop_arr_with_buckets(input_image, buckets=[512])
50
+ else:
51
+ editing_caption = prompt
52
+ input_image = None
53
+ img_cfg = 1.0
54
+
55
+ image = pipeline.generate_image(
56
+ captions=editing_caption,
57
+ images=input_image,
58
+ num_images_per_caption=2,
59
+ positive_prompt=positive_prompt,
60
+ negative_prompt=negative_prompt,
61
+ hw=(512, 512),
62
+ cfg=text_cfg,
63
+ cfg_img=img_cfg,
64
+ cfg_schedule="constant",
65
+ use_norm=True,
66
+ num_sampling_steps=num_inference_steps,
67
+ seed=seed,
68
+ progress=True,
69
+ )
70
+
71
+ return image[0], image[1]
72
+
73
+
74
+ examples = [
75
+ ["修改图像,让白马向镜头奔跑。", "assets/1.jpg"],
76
+ ["Change the background to the sea view.", "assets/2.jpg"],
77
+ ["Add a pirate hat to the dog's head. Change the background to a stormy sea with dark clouds. Include the text 'NextStep-Edit' in bold white letters at the top portion of the image.", "assets/3.jpg"],
78
+ ]
79
+
80
+ css = """
81
+ #col-container {
82
+ margin: 0 auto;
83
+ max-width: 800px;
84
+ }
85
+ """
86
+
87
+ with gr.Blocks(css=css) as demo:
88
+ with gr.Column(elem_id="col-container"):
89
+ gr.Markdown(" # NextStep-1-Large-Edit")
90
+
91
+ with gr.Row():
92
+ prompt = gr.Text(
93
+ label="Prompt",
94
+ show_label=False,
95
+ max_lines=1,
96
+ placeholder="Enter your prompt",
97
+ container=False,
98
+ )
99
+
100
+ run_button = gr.Button("Run", scale=0, variant="primary")
101
+
102
+ with gr.Row():
103
+ ref = gr.Image(label="Reference Image", show_label=True, type="pil", height=400)
104
+
105
+ with gr.Accordion("Advanced Settings", open=True):
106
+ positive_prompt = gr.Text(
107
+ label="Positive Prompt",
108
+ show_label=False,
109
+ max_lines=2,
110
+ placeholder="Enter your positive prompt",
111
+ container=False,
112
+ )
113
+ negative_prompt = gr.Text(
114
+ label="Negative Prompt",
115
+ show_label=False,
116
+ max_lines=2,
117
+ placeholder="Enter your negative prompt",
118
+ container=False,
119
+ )
120
+ with gr.Row():
121
+ seed = gr.Slider(
122
+ label="Seed",
123
+ minimum=0,
124
+ maximum=MAX_SEED,
125
+ step=1,
126
+ value=42,
127
+ )
128
+ num_inference_steps = gr.Slider(
129
+ label="# sampling steps",
130
+ minimum=10,
131
+ maximum=50,
132
+ step=1,
133
+ value=30, # Replace with defaults that work for your model
134
+ )
135
+
136
+ with gr.Row():
137
+ text_cfg = gr.Slider(
138
+ label="Text cfg",
139
+ minimum=1.0,
140
+ maximum=15.0,
141
+ step=0.1,
142
+ value=7.5, # Replace with defaults that work for your model
143
+ )
144
+ img_cfg = gr.Slider(
145
+ label="Image cfg",
146
+ minimum=1.0,
147
+ maximum=15.0,
148
+ step=0.1,
149
+ value=2.0, # Replace with defaults that work for your model
150
+ )
151
+
152
+ with gr.Row():
153
+ result_1 = gr.Image(label="Result 1", show_label=False, container=True, height=400, visible=False)
154
+ result_2 = gr.Image(label="Result 2", show_label=False, container=True, height=400, visible=False)
155
+
156
+ gr.Examples(examples=examples, inputs=[prompt, ref])
157
+
158
+ def show_result():
159
+ return gr.update(visible=True), gr.update(visible=True)
160
+
161
+ gr.on(
162
+ triggers=[run_button.click, prompt.submit],
163
+ fn=infer,
164
+ inputs=[
165
+ prompt,
166
+ ref,
167
+ seed,
168
+ text_cfg,
169
+ img_cfg,
170
+ num_inference_steps,
171
+ positive_prompt,
172
+ negative_prompt,
173
+ ],
174
+ outputs=[result_1, result_2],
175
+ )
176
+
177
+ gr.on(
178
+ triggers=[run_button.click, prompt.submit],
179
+ fn=show_result,
180
+ outputs=[result_1, result_2],
181
+ )
182
+
183
+
184
+ if __name__ == "__main__":
185
+ demo.launch()
assets/1.jpg ADDED

Git LFS Details

  • SHA256: 20e489ca466f174f5bc2b74a173034cd8ec0dc1f719527fcc4b1ed55a5a20aaa
  • Pointer size: 132 Bytes
  • Size of remote file: 3.75 MB
assets/2.jpg ADDED

Git LFS Details

  • SHA256: bc365357eb5d31072cc71979236a1eb706ed95e030efd612f0b6c627ed0a9410
  • Pointer size: 132 Bytes
  • Size of remote file: 1.88 MB
assets/3.jpg ADDED
models/config.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.llama.configuration_llama import LlamaConfig
2
+
3
+ class NextStepConfig(LlamaConfig):
4
+
5
+ model_type = "nextstep"
6
+
7
+ def __init__(
8
+ self,
9
+ vae_name_or_path: str | None = None,
10
+ latent_size: int = 32,
11
+ latent_patch_size: int = 2,
12
+ latent_channels: int = 16,
13
+ boi: int | None = None,
14
+ eoi: int | None = None,
15
+ image_placeholder_id: int | None = None,
16
+ pad_token_id_added: int | None = None,
17
+ lm_loss_weight: float = 0.01,
18
+ im_loss_weight: float = 1.0,
19
+ fm_head_dim: int = 1536,
20
+ fm_head_layers: int = 12,
21
+ fm_head_batch_mul: int = 4,
22
+ o_attention_bias: bool | None = None,
23
+ **kwargs,
24
+ ):
25
+ super().__init__(**kwargs)
26
+
27
+ self.vae_name_or_path = vae_name_or_path
28
+
29
+ self.latent_size = latent_size
30
+ self.latent_patch_size = latent_patch_size
31
+ self.latent_channels = latent_channels
32
+
33
+ self.boi = boi
34
+ self.eoi = eoi
35
+ self.image_placeholder_id = image_placeholder_id
36
+ self.pad_token_id_added = pad_token_id_added
37
+
38
+ self.lm_loss_weight = lm_loss_weight
39
+ self.im_loss_weight = im_loss_weight
40
+
41
+ self.fm_head_dim = fm_head_dim
42
+ self.fm_head_layers = fm_head_layers
43
+ self.fm_head_batch_mul = fm_head_batch_mul
44
+
45
+ self.o_attention_bias = self.attention_bias if o_attention_bias is None else o_attention_bias
models/gen_pipeline.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import copy
3
+ from typing import Literal
4
+
5
+ from PIL import Image
6
+ from tqdm.auto import tqdm
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torchvision.transforms as transforms
11
+
12
+ from transformers import AutoTokenizer
13
+ from transformers.cache_utils import Cache, StaticCache
14
+
15
+ from models.nextstep_model import NextStep
16
+ from vae.nextstep_ae import AutoencoderKL
17
+ from utils.image_utils import to_pil
18
+ from utils.model_utils import layer_norm
19
+ from utils.compile_utils import compile_manager
20
+ from utils.misc import set_seed
21
+
22
+ DEFAULT_IMAGE_AREA_TOKEN = "<|image_area|>"
23
+
24
+
25
+ def hw2str(h: int, w: int) -> str:
26
+ return f"{h}*{w}"
27
+
28
+
29
+ class NextStepPipeline:
30
+ def __init__(
31
+ self,
32
+ model_name_or_path: str | None = None,
33
+ vae_name_or_path: str | None = None,
34
+ tokenizer: AutoTokenizer | None = None,
35
+ model: nn.Module | None = None,
36
+ vae: AutoencoderKL | None = None,
37
+ ):
38
+ if model is not None:
39
+ self.tokenizer = copy.deepcopy(tokenizer)
40
+ self.tokenizer.padding_side = "left"
41
+ self.model = model
42
+
43
+ elif model_name_or_path is not None:
44
+ self.tokenizer = AutoTokenizer.from_pretrained(
45
+ model_name_or_path,
46
+ local_files_only=True,
47
+ model_max_length=4096,
48
+ padding_side="left",
49
+ use_fast=True,
50
+ )
51
+ self.model: NextStep = NextStep.from_pretrained(model_name_or_path, local_files_only=True)
52
+
53
+ else:
54
+ raise ValueError("model or model_name_or_path is required")
55
+
56
+ self.tokenizer.add_eos_token = False
57
+
58
+ if vae_name_or_path is None:
59
+ vae_name_or_path = getattr(self.model.config, "vae_name_or_path", None)
60
+
61
+ if vae is not None:
62
+ self.vae = vae
63
+ elif vae_name_or_path is not None:
64
+ self.vae = AutoencoderKL.from_pretrained(vae_name_or_path)
65
+ else:
66
+ raise ValueError("vae or vae_name_or_path is required")
67
+
68
+ self.model.eval()
69
+ self.vae.eval()
70
+
71
+ vae_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
72
+ self.down_factor = vae_factor * self.model.config.latent_patch_size
73
+
74
+ self.shift_factor = getattr(self.vae.config, "shift_factor", 0.0)
75
+ self.scaling_factor = getattr(self.vae.config, "scaling_factor", 1.0)
76
+
77
+ self.boi = self.model.config.boi
78
+ self.eoi = self.model.config.eoi
79
+ self.image_placeholder_id = self.model.config.image_placeholder_id
80
+
81
+ self.pil2tensor = transforms.Compose(
82
+ [
83
+ transforms.ToTensor(),
84
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
85
+ ]
86
+ )
87
+
88
+ self.__device = self.model.device
89
+ self.__dtype = self.model.dtype
90
+
91
+ @property
92
+ def device(self):
93
+ return self.__device
94
+
95
+ @property
96
+ def device_type(self):
97
+ if isinstance(self.__device, str):
98
+ return self.__device
99
+ return self.__device.type
100
+
101
+ @property
102
+ def dtype(self):
103
+ return self.__dtype
104
+
105
+ def to(self, device: str | None = None, dtype: torch.dtype | None = None):
106
+ if device is not None:
107
+ self.__device = device
108
+ if dtype is not None:
109
+ self.__dtype = dtype
110
+ self.model.to(self.__device, dtype=self.__dtype)
111
+ self.vae.to(self.__device, dtype=self.__dtype)
112
+ return self
113
+
114
+ def _image_str(self, hw: tuple[int, int] = (256, 256)):
115
+ latent_hw = (hw[0] // self.down_factor, hw[1] // self.down_factor)
116
+ image_ids = [self.boi] + [self.image_placeholder_id] * (latent_hw[0] * latent_hw[1]) + [self.eoi]
117
+ image_str = DEFAULT_IMAGE_AREA_TOKEN + hw2str(*latent_hw) + self.tokenizer.decode(image_ids)
118
+ return image_str
119
+
120
+ def _check_input(
121
+ self, captions: str | list[str], images: Image.Image | list[Image.Image] | None
122
+ ) -> tuple[list[str], list[Image.Image] | None]:
123
+ if not isinstance(captions, list):
124
+ captions = [captions]
125
+
126
+ if images is not None:
127
+ if not isinstance(images, list):
128
+ images = [images]
129
+
130
+ # Validate image count matches <image> tokens in captions
131
+ image_token_count = 0
132
+ for caption in captions:
133
+ num_image_token = len(re.findall(r"<image>", caption))
134
+ assert num_image_token == 1, f"Caption `{caption}` has {num_image_token} image tokens, but only 1 is allowed."
135
+ image_token_count += num_image_token
136
+ if image_token_count != len(images):
137
+ raise ValueError(
138
+ f"Number of images ({len(images)}) does not match number of image tokens ({image_token_count}).\n"
139
+ f"Captions: {captions}"
140
+ )
141
+
142
+ hws = [(image.size[1], image.size[0]) for image in images]
143
+
144
+ # Replace <image> tokens sequentially with corresponding image_str based on hw
145
+ processed_captions = []
146
+ image_idx = 0
147
+ for caption in captions:
148
+ # Process each caption
149
+ processed_caption = caption
150
+ num_image_tokens = processed_caption.count("<image>")
151
+
152
+ # Replace each <image> token in order
153
+ for _ in range(num_image_tokens):
154
+ processed_caption = processed_caption.replace("<image>", self._image_str(hws[image_idx]), 1)
155
+ image_idx += 1
156
+
157
+ processed_captions.append(processed_caption)
158
+
159
+ captions = processed_captions
160
+ return captions, images
161
+
162
+ def _build_captions(
163
+ self,
164
+ captions: str | list[str],
165
+ images: list[Image.Image] | None = None,
166
+ num_images_per_caption: int = 1,
167
+ positive_prompt: str | None = None,
168
+ negative_prompt: str | None = None,
169
+ cfg: float = 1.0,
170
+ cfg_img: float = 1.0,
171
+ ):
172
+ # 1. repeat captions and images
173
+ if not isinstance(captions, list):
174
+ captions = [captions]
175
+ captions = [caption for caption in captions for _ in range(num_images_per_caption)]
176
+ if images is not None:
177
+ images = [image for image in images for _ in range(num_images_per_caption)]
178
+
179
+ # 2. add positive prompt
180
+ if positive_prompt is None:
181
+ positive_prompt = ""
182
+ captions = [f"{caption} {positive_prompt}" for caption in captions]
183
+
184
+ # 3. add negative prompt
185
+ if negative_prompt is None:
186
+ negative_prompt = ""
187
+ num_samples = len(captions)
188
+ if cfg != 1.0 and cfg_img != 1.0: # use both image and text CFG
189
+ w, h = images[0].size
190
+ captions = (
191
+ captions + [self._image_str((h, w)) + negative_prompt] * num_samples
192
+ )
193
+ images = images + images
194
+ captions = captions + [negative_prompt] * num_samples
195
+ elif cfg != 1.0 and cfg_img == 1.0: # use text CFG
196
+ captions = captions + [negative_prompt] * num_samples
197
+ elif cfg == 1.0 and cfg_img == 1.0:
198
+ pass
199
+
200
+ return captions, images
201
+
202
+ def _add_prefix_ids(self, hw: tuple[int, int], input_ids: torch.Tensor, attention_mask: torch.Tensor):
203
+ prefix_str = DEFAULT_IMAGE_AREA_TOKEN + hw2str(hw[0] // self.down_factor, hw[1] // self.down_factor)
204
+ prefix_output = self.tokenizer(prefix_str, truncation=False, add_special_tokens=True, return_tensors="pt")
205
+ prefix_input_ids = prefix_output.input_ids.to(input_ids.device, dtype=input_ids.dtype)
206
+ prefix_attention_mask = prefix_output.attention_mask.to(attention_mask.device, dtype=attention_mask.dtype)
207
+
208
+ # remove bos token
209
+ if self.tokenizer.bos_token is not None:
210
+ prefix_input_ids = prefix_input_ids[:, 1:]
211
+ prefix_attention_mask = prefix_attention_mask[:, 1:]
212
+
213
+ # add boi token
214
+ prefix_input_ids = torch.cat(
215
+ [
216
+ prefix_input_ids,
217
+ prefix_input_ids.new_tensor([self.model.config.boi]).unsqueeze(0),
218
+ ],
219
+ dim=1,
220
+ )
221
+ prefix_attention_mask = torch.cat(
222
+ [
223
+ prefix_attention_mask,
224
+ prefix_attention_mask.new_ones((prefix_attention_mask.shape[0], 1)),
225
+ ],
226
+ dim=1,
227
+ )
228
+
229
+ bsz = input_ids.shape[0]
230
+ input_ids = torch.cat([input_ids, prefix_input_ids.expand(bsz, -1)], dim=1)
231
+ attention_mask = torch.cat([attention_mask, prefix_attention_mask.expand(bsz, -1)], dim=1)
232
+
233
+ return input_ids, attention_mask
234
+
235
+ @torch.no_grad()
236
+ def decoding(
237
+ self,
238
+ c: torch.Tensor,
239
+ attention_mask: torch.Tensor,
240
+ past_key_values: Cache,
241
+ max_new_len: int,
242
+ num_images_per_caption: int,
243
+ use_norm: bool = False,
244
+ cfg: float = 1.0,
245
+ cfg_img: float = 1.0,
246
+ cfg_schedule: Literal["linear", "constant"] = "constant",
247
+ timesteps_shift: float = 1.0,
248
+ num_sampling_steps: int = 20,
249
+ progress: bool = True,
250
+ ):
251
+ indices = list(range(max_new_len))
252
+ indices = tqdm(indices, unit="tokens") if progress else indices
253
+
254
+ tokens = None
255
+ unnormed_tokens = None
256
+ for _ in indices:
257
+ # cfg schedule follow Muse
258
+ if cfg_schedule == "linear":
259
+ tokens_len = 0 if tokens is None else tokens.shape[1]
260
+ cfg_iter = max(cfg / 2, 1 + (cfg - 1) * tokens_len / max_new_len)
261
+ cfg_img_iter = max(cfg_img / 2, 1 + (cfg_img - 1) * tokens_len / max_new_len)
262
+ elif cfg_schedule == "constant":
263
+ cfg_iter = cfg
264
+ cfg_img_iter = cfg_img
265
+ else:
266
+ raise NotImplementedError
267
+
268
+ c = self.model.image_out_projector(c)
269
+ token_sampled = self.model.image_head.sample(
270
+ c=c.squeeze(1),
271
+ cfg=cfg_iter,
272
+ cfg_img=cfg_img_iter,
273
+ timesteps_shift=timesteps_shift,
274
+ num_sampling_steps=num_sampling_steps,
275
+ noise_repeat=num_images_per_caption,
276
+ )
277
+
278
+ unnormed_token_sampled = token_sampled.clone()
279
+ if use_norm:
280
+ token_sampled = layer_norm(token_sampled, normalized_shape=token_sampled.size()[1:])
281
+
282
+ if tokens is not None:
283
+ tokens = torch.cat([tokens, token_sampled.unsqueeze(1)], dim=1)
284
+ unnormed_tokens = torch.cat([unnormed_tokens, unnormed_token_sampled.unsqueeze(1)], dim=1)
285
+ else:
286
+ tokens = token_sampled.unsqueeze(1)
287
+ unnormed_tokens = unnormed_token_sampled.unsqueeze(1)
288
+
289
+ cur_inputs_embeds = self.model.image_in_projector(tokens[:, -1:])
290
+ if cfg != 1.0 and cfg_img == 1.0:
291
+ cur_inputs_embeds = torch.cat([cur_inputs_embeds, cur_inputs_embeds], dim=0)
292
+ elif cfg != 1.0 and cfg_img != 1.0:
293
+ cur_inputs_embeds = torch.cat([cur_inputs_embeds, cur_inputs_embeds, cur_inputs_embeds], dim=0)
294
+
295
+ attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
296
+ outputs = self.model.forward_model(
297
+ inputs_embeds=cur_inputs_embeds,
298
+ attention_mask=attention_mask,
299
+ past_key_values=past_key_values,
300
+ use_cache=True,
301
+ )
302
+ past_key_values = outputs.past_key_values
303
+ c = outputs.last_hidden_state[:, -1:]
304
+
305
+ return unnormed_tokens
306
+
307
+ @torch.no_grad()
308
+ def generate_image(
309
+ self,
310
+ captions: str | list[str],
311
+ images: list[Image.Image] | None = None,
312
+ num_images_per_caption: int = 1,
313
+ positive_prompt: str | None = None,
314
+ negative_prompt: str | None = None,
315
+ hw: tuple[int, int] = (256, 256),
316
+ use_norm: bool = False,
317
+ cfg: float = 1.0,
318
+ cfg_img: float = 1.0,
319
+ cfg_schedule: Literal["linear", "constant"] = "constant",
320
+ num_sampling_steps: int = 20,
321
+ timesteps_shift: float = 1.0,
322
+ seed: int = 42,
323
+ progress: bool = True,
324
+ ) -> list[Image.Image]:
325
+ # 1. check input
326
+ captions, images = self._check_input(captions, images)
327
+
328
+ # 2. build captions
329
+ captions, images = self._build_captions(
330
+ captions, images, num_images_per_caption, positive_prompt, negative_prompt, cfg, cfg_img
331
+ )
332
+
333
+ # 3. encode images
334
+ # `images` must be processed by `process_images` before calling this function
335
+ latents = None
336
+ if images is not None:
337
+ pixel_values = [self.pil2tensor(image) for image in images]
338
+ pixel_values = torch.stack(pixel_values).to(self.device)
339
+ with compile_manager.compile_disabled():
340
+ posterior = self.vae.encode(pixel_values.to(self.vae.dtype)).latent_dist
341
+ latents = (posterior.sample() - self.shift_factor) * self.scaling_factor
342
+
343
+ if seed is not None:
344
+ set_seed(seed)
345
+
346
+ # 4. tokenize caption & add prefix ids
347
+ output = self.tokenizer(captions, padding="longest", truncation=False, add_special_tokens=True, return_tensors="pt", padding_side="left")
348
+ input_ids = output.input_ids.to(self.device)
349
+ attention_mask = output.attention_mask.to(self.device)
350
+ input_ids, attention_mask = self._add_prefix_ids(hw, input_ids, attention_mask)
351
+
352
+ # 5. LLM prefill
353
+ max_new_len = (hw[0] // self.down_factor) * (hw[1] // self.down_factor)
354
+ max_cache_len = input_ids.shape[1] + max_new_len
355
+ past_key_values = StaticCache(
356
+ config=self.model.config,
357
+ max_batch_size=input_ids.shape[0],
358
+ max_cache_len=max_cache_len,
359
+ device=self.device,
360
+ dtype=self.dtype,
361
+ )
362
+ inputs_embeds = self.model.prepare_inputs_embeds(input_ids, latents)
363
+ with compile_manager.compile_disabled():
364
+ outputs = self.model.forward_model(
365
+ inputs_embeds=inputs_embeds,
366
+ attention_mask=attention_mask,
367
+ past_key_values=past_key_values,
368
+ use_cache=True,
369
+ )
370
+ past_key_values = outputs.past_key_values
371
+ c = outputs.last_hidden_state[:, -1:]
372
+
373
+ # 6. decoding
374
+ tokens = self.decoding(
375
+ c=c,
376
+ attention_mask=attention_mask,
377
+ past_key_values=past_key_values,
378
+ max_new_len=max_new_len,
379
+ num_images_per_caption=num_images_per_caption,
380
+ use_norm=use_norm,
381
+ cfg=cfg,
382
+ cfg_img=cfg_img,
383
+ cfg_schedule=cfg_schedule,
384
+ timesteps_shift=timesteps_shift,
385
+ num_sampling_steps=num_sampling_steps,
386
+ progress=progress,
387
+ )
388
+
389
+ # 7. unpatchify
390
+ latents = self.model.unpatchify(tokens)
391
+ latents = (latents / self.scaling_factor) + self.shift_factor
392
+
393
+ # 8. decode latents
394
+ with compile_manager.compile_disabled():
395
+ sampled_images = self.vae.decode(latents.to(self.vae.dtype)).sample
396
+ sampled_images = sampled_images.detach().cpu().to(torch.float32)
397
+ pil_images = [to_pil(img) for img in sampled_images]
398
+
399
+ return pil_images
models/heads.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from torch.utils.checkpoint import checkpoint
6
+
7
+ from transformers.activations import ACT2FN
8
+
9
+ from models.config import LlamaConfig
10
+ from utils.misc import LargeInt
11
+ from utils.model_utils import expand_t, randn_tensor
12
+ from utils.compile_utils import smart_compile
13
+
14
+
15
+ class LlamaMLP(nn.Module):
16
+ def __init__(self, config: LlamaConfig):
17
+ super().__init__()
18
+ self.config = config
19
+ self.hidden_size = config.hidden_size
20
+ self.intermediate_size = config.intermediate_size
21
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
22
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
23
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
24
+ self.act_fn = ACT2FN[config.hidden_act]
25
+
26
+ def forward(self, x):
27
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
28
+ return down_proj
29
+
30
+
31
+
32
+
33
+ def modulate(x, shift, scale=None):
34
+ if shift is None:
35
+ return x * (1 + scale)
36
+ return x * (1 + scale) + shift
37
+
38
+
39
+ class ResBlock(nn.Module):
40
+ def __init__(self, channels, mlp_ratio=1.0):
41
+ super().__init__()
42
+ self.channels = channels
43
+ self.intermediate_size = int(channels * mlp_ratio)
44
+
45
+ self.in_ln = nn.LayerNorm(self.channels, eps=1e-6)
46
+ self.mlp = nn.Sequential(
47
+ nn.Linear(self.channels, self.intermediate_size),
48
+ nn.SiLU(),
49
+ nn.Linear(self.intermediate_size, self.channels),
50
+ )
51
+
52
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(channels, 3 * channels, bias=True))
53
+
54
+ def forward(self, x, y):
55
+ shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(y).chunk(3, dim=-1)
56
+ h = modulate(self.in_ln(x), shift_mlp, scale_mlp)
57
+ h = self.mlp(h)
58
+ return x + gate_mlp * h
59
+
60
+
61
+ class FinalLayer(nn.Module):
62
+ def __init__(self, model_channels, out_channels):
63
+ super().__init__()
64
+ self.norm_final = nn.LayerNorm(model_channels, elementwise_affine=False, eps=1e-6)
65
+ self.linear = nn.Linear(model_channels, out_channels, bias=True)
66
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(model_channels, 2 * model_channels, bias=True))
67
+
68
+ def forward(self, x, c):
69
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
70
+ x = modulate(self.norm_final(x), shift, scale)
71
+ x = self.linear(x)
72
+ return x
73
+
74
+
75
+ class TimestepEmbedder(nn.Module):
76
+ """
77
+ Embeds scalar timesteps into vector representations.
78
+ """
79
+
80
+ def __init__(self, hidden_size, frequency_embedding_size=256):
81
+ super().__init__()
82
+ self.mlp = nn.Sequential(
83
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
84
+ nn.SiLU(),
85
+ nn.Linear(hidden_size, hidden_size, bias=True),
86
+ )
87
+ self.frequency_embedding_size = frequency_embedding_size
88
+
89
+ @staticmethod
90
+ def timestep_embedding(t: torch.Tensor, dim: int, max_period: float = 10000.0):
91
+ """
92
+ Create sinusoidal timestep embeddings.
93
+ :param t: a 1-D Tensor of N indices, one per batch element. These may be fractional.
94
+ :param dim: the dimension of the output.
95
+ :param max_period: controls the minimum frequency of the embeddings.
96
+ :return: an (N, D) Tensor of positional embeddings.
97
+ """
98
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
99
+ half = dim // 2
100
+ freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
101
+ device=t.device
102
+ )
103
+ args = t[:, None].float() * freqs[None]
104
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
105
+ if dim % 2:
106
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
107
+ return embedding
108
+
109
+ def forward(self, t):
110
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
111
+ t_emb = self.mlp(t_freq.to(self.mlp[0].weight.dtype))
112
+ return t_emb
113
+
114
+
115
+ class SimpleMLPAdaLN(nn.Module):
116
+ def __init__(self, input_dim, cond_dim, dim=1536, layers=12, mlp_ratio=1.0):
117
+ super().__init__()
118
+ self.input_dim = input_dim
119
+ self.cond_dim = cond_dim
120
+ self.dim = dim
121
+ self.layers = layers
122
+ self.mlp_ratio = mlp_ratio
123
+
124
+ self.time_embed = TimestepEmbedder(dim)
125
+ self.cond_embed = nn.Linear(cond_dim, dim)
126
+ self.input_proj = nn.Linear(input_dim, dim)
127
+
128
+ res_blocks = []
129
+ for _ in range(layers):
130
+ res_blocks.append(ResBlock(dim, mlp_ratio))
131
+ self.res_blocks = nn.ModuleList(res_blocks)
132
+
133
+ self.final_layer = FinalLayer(dim, input_dim)
134
+
135
+ self.grad_checkpointing = False
136
+
137
+
138
+
139
+ @smart_compile()
140
+ def forward(self, x, t, c):
141
+ """
142
+ x.shape = (bsz, input_dim)
143
+ t.shape = (bsz,)
144
+ c.shape = (bsz, cond_dim)
145
+ """
146
+
147
+ x = self.input_proj(x)
148
+ t = self.time_embed(t)
149
+ c = self.cond_embed(c)
150
+
151
+ y = t + c
152
+
153
+ for block in self.res_blocks:
154
+ if self.grad_checkpointing and self.training:
155
+ x = checkpoint(block, x, y, use_reentrant=True)
156
+ else:
157
+ x = block(x, y)
158
+
159
+ return self.final_layer(x, y)
160
+
161
+
162
+ class FlowMatchingHead(nn.Module):
163
+
164
+ def __init__(self, input_dim, cond_dim, dim=1536, layers=12, mlp_ratio=1.0):
165
+ super(FlowMatchingHead, self).__init__()
166
+ self.input_dim = input_dim
167
+ self.net = SimpleMLPAdaLN(input_dim=input_dim, cond_dim=cond_dim, dim=dim, layers=layers, mlp_ratio=mlp_ratio)
168
+
169
+ @property
170
+ def dtype(self):
171
+ return self.net.input_proj.weight.dtype
172
+
173
+ @property
174
+ def device(self):
175
+ return self.net.input_proj.weight.device
176
+
177
+ @property
178
+ def trainable_params(self) -> float:
179
+ n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
180
+ return LargeInt(n_params)
181
+
182
+
183
+ def get_score_from_velocity(self, velocity, x, t):
184
+ """Wrapper function: transfrom velocity prediction model to score
185
+ Args:
186
+ velocity: [bsz, ...] shaped tensor; velocity model output
187
+ x: [bsz, ...] shaped tensor; x_t data point
188
+ t: [bsz,] time tensor
189
+ """
190
+ t = expand_t(t, x)
191
+ alpha_t, d_alpha_t = t, 1
192
+ sigma_t, d_sigma_t = 1 - t, -1
193
+ mean = x
194
+ reverse_alpha_ratio = alpha_t / d_alpha_t
195
+ var = sigma_t**2 - reverse_alpha_ratio * d_sigma_t * sigma_t
196
+ score = (reverse_alpha_ratio * velocity - mean) / var
197
+ return score
198
+
199
+ def get_velocity_from_cfg(self, velocity, cfg, cfg_img, cfg_mult):
200
+ if cfg_mult == 2:
201
+ cond_v, uncond_v = torch.chunk(velocity, 2, dim=0)
202
+ velocity = uncond_v + cfg * (cond_v - uncond_v)
203
+ elif cfg_mult == 3:
204
+ cond_v, uncond_v1, uncond_v2 = torch.chunk(velocity, 3, dim=0)
205
+ velocity = uncond_v2 + cfg_img * (uncond_v1 - uncond_v2) + cfg * (cond_v - uncond_v1)
206
+ return velocity
207
+
208
+ @smart_compile(options={"triton.cudagraphs": True}, fullgraph=True)
209
+ @torch.no_grad()
210
+ def sample(
211
+ self,
212
+ c: torch.Tensor,
213
+ cfg: float = 1.0,
214
+ cfg_img: float = 1.0,
215
+ timesteps_shift: float = 1.0,
216
+ num_sampling_steps: int = 20,
217
+ last_step_size: float = 0.0,
218
+ noise_repeat: int = 1,
219
+ ):
220
+ """c.shape = (bsz, cond_dim)"""
221
+ cfg_mult = 1
222
+ if cfg > 1.0:
223
+ cfg_mult += 1
224
+ if cfg_img > 1.0:
225
+ cfg_mult += 1
226
+
227
+ device, dtype = c.device, c.dtype
228
+ noise = randn_tensor((c.shape[0] // cfg_mult, self.input_dim), noise_repeat, device, dtype)
229
+
230
+ mean_x = noise
231
+ x = noise
232
+ xs = []
233
+
234
+ t0, t1 = 0, 1
235
+ timesteps = torch.linspace(t0, t1, num_sampling_steps + 1, device=device)[:-1]
236
+ timesteps = timesteps / (timesteps_shift - (timesteps_shift - 1) * timesteps)
237
+ timesteps = torch.cat([timesteps, torch.ones(1, device=device)])
238
+ for ti, tj in zip(timesteps[:-1], timesteps[1:]):
239
+ dt = tj - ti
240
+
241
+ combined = torch.cat([x] * cfg_mult, dim=0)
242
+ velocity = self.net(combined.to(c.dtype), ti.expand(c.shape[0]).to(c), c)
243
+ velocity = velocity.to(torch.float32)
244
+
245
+ velocity = self.get_velocity_from_cfg(velocity, cfg, cfg_img, cfg_mult)
246
+ score = self.get_score_from_velocity(velocity, x, ti.expand(x.shape[0]).to(x))
247
+ drift = velocity + (1 - expand_t(ti.expand(x.shape[0]).to(x), x)) * score
248
+
249
+ w_cur = randn_tensor((c.shape[0] // cfg_mult, self.input_dim), noise_repeat, device, dtype)
250
+ dw = w_cur * torch.sqrt(dt)
251
+
252
+ mean_x = x + drift * dt
253
+ x = mean_x + torch.sqrt(2 * (1 - expand_t(ti.expand(x.shape[0]).to(x), x))) * dw
254
+ xs.append(x)
255
+
256
+
257
+ if len(xs) != num_sampling_steps:
258
+ raise ValueError(f"Samples ({len(xs)}) does not match the number of steps ({num_sampling_steps})")
259
+
260
+ return xs[-1]
261
+
models/llama_model.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ from loguru import logger
3
+ import math
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from transformers.cache_utils import Cache, StaticCache
9
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
10
+ from transformers.utils import is_flash_attn_greater_or_equal_2_10
11
+ from transformers import ROPE_INIT_FUNCTIONS
12
+ from transformers.models.llama.configuration_llama import LlamaConfig
13
+
14
+ from models.heads import LlamaMLP
15
+ from utils.model_utils import apply_rotary_pos_emb, repeat_kv
16
+ from models.config import NextStepConfig
17
+
18
+
19
+ class LlamaRMSNorm(nn.Module):
20
+ """LlamaRMSNorm is equivalent to T5LayerNorm"""
21
+
22
+ def __init__(self, hidden_size, eps=1e-6):
23
+ super().__init__()
24
+ self.weight = nn.Parameter(torch.ones(hidden_size))
25
+ self.variance_epsilon = eps
26
+
27
+ def forward(self, hidden_states):
28
+ input_dtype = hidden_states.dtype
29
+ hidden_states = hidden_states.to(torch.float32)
30
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
31
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
32
+ return self.weight * hidden_states.to(input_dtype)
33
+
34
+ def extra_repr(self):
35
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
36
+
37
+
38
+ class LlamaRotaryEmbedding(nn.Module):
39
+ def __init__(self, device=None, config: Optional[LlamaConfig] = None):
40
+ super().__init__()
41
+ self.rope_type = "default"
42
+ self.config = config
43
+
44
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
45
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
46
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
47
+
48
+ @torch.no_grad()
49
+ def forward(self, x, position_ids):
50
+ # Core RoPE block
51
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
52
+ position_ids_expanded = position_ids[:, None, :].float()
53
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
54
+ device_type = x.device.type
55
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
56
+ with torch.autocast(device_type=device_type, enabled=False):
57
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
58
+ emb = torch.cat((freqs, freqs), dim=-1)
59
+ cos = emb.cos()
60
+ sin = emb.sin()
61
+
62
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
63
+ cos = cos * self.attention_scaling
64
+ sin = sin * self.attention_scaling
65
+
66
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
67
+
68
+
69
+ class LlamaAttention(nn.Module):
70
+ def __init__(self, config: NextStepConfig, layer_idx: Optional[int]):
71
+ super().__init__()
72
+ self.config = config
73
+ self.layer_idx = layer_idx
74
+
75
+ self.attention_dropout = config.attention_dropout
76
+ self.hidden_size = config.hidden_size
77
+ self.num_heads = config.num_attention_heads
78
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
79
+ self.num_key_value_heads = config.num_key_value_heads
80
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
81
+ self.max_position_embeddings = config.max_position_embeddings
82
+ self.rope_theta = config.rope_theta
83
+ self.is_causal = True
84
+
85
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
86
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
87
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
88
+ self.o_proj = nn.Linear(
89
+ self.num_heads * self.head_dim, self.hidden_size, bias=getattr(config, "o_attention_bias", config.attention_bias)
90
+ )
91
+ self._flash_attn_uses_top_left_mask = False
92
+
93
+ def forward_sdpa(
94
+ self,
95
+ hidden_states: torch.Tensor,
96
+ attention_mask: Optional[torch.Tensor] = None,
97
+ position_ids: Optional[torch.LongTensor] = None,
98
+ past_key_value: Optional[Cache] = None,
99
+ output_attentions: bool = False,
100
+ use_cache: bool = False,
101
+ cache_position: Optional[torch.LongTensor] = None,
102
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
103
+ **kwargs,
104
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
105
+ bsz, q_len, _ = hidden_states.size()
106
+
107
+ query_states = self.q_proj(hidden_states)
108
+ key_states = self.k_proj(hidden_states)
109
+ value_states = self.v_proj(hidden_states)
110
+
111
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
112
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
113
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
114
+
115
+ if position_embeddings is None:
116
+ logger.warning_once(
117
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
118
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
119
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
120
+ "removed and `position_embeddings` will be mandatory."
121
+ )
122
+ cos, sin = self.rotary_emb(value_states, position_ids)
123
+ else:
124
+ cos, sin = position_embeddings
125
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
126
+
127
+ if past_key_value is not None:
128
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
129
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
130
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
131
+
132
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
133
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
134
+
135
+ causal_mask = attention_mask
136
+ if attention_mask is not None:
137
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
138
+
139
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
140
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
141
+ if query_states.device.type == "cuda" and causal_mask is not None:
142
+ query_states = query_states.contiguous()
143
+ key_states = key_states.contiguous()
144
+ value_states = value_states.contiguous()
145
+
146
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
147
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
148
+ is_causal = True if causal_mask is None and q_len > 1 else False
149
+
150
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
151
+ query_states,
152
+ key_states,
153
+ value_states,
154
+ attn_mask=causal_mask,
155
+ dropout_p=self.attention_dropout if self.training else 0.0,
156
+ is_causal=is_causal,
157
+ )
158
+
159
+ attn_output = attn_output.transpose(1, 2).contiguous()
160
+ attn_output = attn_output.view(bsz, q_len, -1)
161
+
162
+ attn_output = self.o_proj(attn_output)
163
+
164
+ return attn_output, None, past_key_value
165
+
166
+ def forward_flash(
167
+ self,
168
+ hidden_states: torch.Tensor,
169
+ attention_mask: Optional[torch.LongTensor] = None,
170
+ position_ids: Optional[torch.LongTensor] = None,
171
+ past_key_value: Optional[Cache] = None,
172
+ output_attentions: bool = False,
173
+ use_cache: bool = False,
174
+ cache_position: Optional[torch.LongTensor] = None,
175
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
176
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
177
+ if isinstance(past_key_value, StaticCache):
178
+ raise ValueError(
179
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
180
+ "make sure to use `sdpa` in the mean time, and open an issue at GitHub - huggingface/transformers: 🤗 Transformers: the model-definition framework for state-of-the-a"
181
+ )
182
+
183
+ output_attentions = False
184
+
185
+ bsz, q_len, _ = hidden_states.size()
186
+
187
+ query_states = self.q_proj(hidden_states)
188
+ key_states = self.k_proj(hidden_states)
189
+ value_states = self.v_proj(hidden_states)
190
+
191
+ # Flash attention requires the input to have the shape
192
+ # batch_size x seq_length x head_dim x hidden_dim
193
+ # therefore we just need to keep the original shape
194
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
195
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
196
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
197
+
198
+ if position_embeddings is None:
199
+ logger.warning_once(
200
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
201
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
202
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
203
+ "removed and `position_embeddings` will be mandatory."
204
+ )
205
+ cos, sin = self.rotary_emb(value_states, position_ids)
206
+ else:
207
+ cos, sin = position_embeddings
208
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
209
+
210
+ if past_key_value is not None:
211
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
212
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
213
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
214
+
215
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
216
+ # to be able to avoid many of these transpose/reshape/view.
217
+ query_states = query_states.transpose(1, 2)
218
+ key_states = key_states.transpose(1, 2)
219
+ value_states = value_states.transpose(1, 2)
220
+
221
+ dropout_rate = self.attention_dropout if self.training else 0.0
222
+
223
+ input_dtype = query_states.dtype
224
+ if input_dtype == torch.float32:
225
+ if torch.is_autocast_enabled():
226
+ target_dtype = torch.get_autocast_gpu_dtype()
227
+ # Handle the case where the model is quantized
228
+ elif hasattr(self.config, "_pre_quantization_dtype"):
229
+ target_dtype = self.config._pre_quantization_dtype
230
+ else:
231
+ target_dtype = self.q_proj.weight.dtype
232
+
233
+ logger.warning_once(
234
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
235
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
236
+ f" {target_dtype}."
237
+ )
238
+
239
+ query_states = query_states.to(target_dtype)
240
+ key_states = key_states.to(target_dtype)
241
+ value_states = value_states.to(target_dtype)
242
+
243
+ attn_output = _flash_attention_forward(
244
+ query_states,
245
+ key_states,
246
+ value_states,
247
+ attention_mask,
248
+ q_len,
249
+ position_ids=position_ids,
250
+ dropout=dropout_rate,
251
+ sliding_window=getattr(self, "sliding_window", None),
252
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
253
+ is_causal=self.is_causal,
254
+ )
255
+
256
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
257
+ attn_output = self.o_proj(attn_output)
258
+
259
+ if not output_attentions:
260
+ attn_weights = None
261
+
262
+ return attn_output, attn_weights, past_key_value
263
+
264
+ def forward(
265
+ self,
266
+ hidden_states: torch.Tensor,
267
+ attention_mask: Optional[torch.Tensor] = None,
268
+ position_ids: Optional[torch.LongTensor] = None,
269
+ past_key_value: Optional[Cache] = None,
270
+ output_attentions: bool = False,
271
+ use_cache: bool = False,
272
+ cache_position: Optional[torch.LongTensor] = None,
273
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
274
+ **kwargs,
275
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
276
+ bsz, q_len, _ = hidden_states.size()
277
+
278
+ query_states = self.q_proj(hidden_states)
279
+ key_states = self.k_proj(hidden_states)
280
+ value_states = self.v_proj(hidden_states)
281
+
282
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
283
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
284
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
285
+
286
+ if position_embeddings is None:
287
+ logger.warning_once(
288
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
289
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
290
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
291
+ "removed and `position_embeddings` will be mandatory."
292
+ )
293
+ cos, sin = self.rotary_emb(value_states, position_ids)
294
+ else:
295
+ cos, sin = position_embeddings
296
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
297
+
298
+ if past_key_value is not None:
299
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
300
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
301
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
302
+
303
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
304
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
305
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
306
+
307
+ if attention_mask is not None: # no matter the length, we just slice it
308
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
309
+ attn_weights = attn_weights + causal_mask
310
+
311
+ # upcast attention to fp32
312
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
313
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
314
+ attn_output = torch.matmul(attn_weights, value_states)
315
+
316
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
317
+ raise ValueError(
318
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
319
+ f" {attn_output.size()}"
320
+ )
321
+
322
+ attn_output = attn_output.transpose(1, 2).contiguous()
323
+
324
+ attn_output = attn_output.reshape(bsz, q_len, -1)
325
+
326
+ attn_output = self.o_proj(attn_output)
327
+
328
+ if not output_attentions:
329
+ attn_weights = None
330
+
331
+ return attn_output, attn_weights, past_key_value
332
+
333
+
334
+ class LlamaFlashAttention2(LlamaAttention):
335
+ """
336
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
337
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
338
+ flash attention and deal with padding tokens in case the input contains any of them.
339
+ """
340
+
341
+ def __init__(self, *args, **kwargs):
342
+ super().__init__(*args, **kwargs)
343
+
344
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
345
+
346
+ def forward(
347
+ self,
348
+ hidden_states: torch.Tensor,
349
+ attention_mask: Optional[torch.LongTensor] = None,
350
+ past_key_value: Optional[Cache] = None,
351
+ output_attentions: bool = False,
352
+ use_cache: bool = False,
353
+ cache_position: Optional[torch.LongTensor] = None,
354
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
355
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
356
+ if isinstance(past_key_value, StaticCache):
357
+ raise ValueError(
358
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
359
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
360
+ )
361
+
362
+ output_attentions = False
363
+
364
+ bsz, q_len, _ = hidden_states.size()
365
+
366
+ query_states = self.q_proj(hidden_states)
367
+ key_states = self.k_proj(hidden_states)
368
+ value_states = self.v_proj(hidden_states)
369
+
370
+ # Flash attention requires the input to have the shape
371
+ # batch_size x seq_length x head_dim x hidden_dim
372
+ # therefore we just need to keep the original shape
373
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
374
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
375
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
376
+
377
+ cos, sin = position_embeddings
378
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
379
+
380
+ if past_key_value is not None:
381
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
382
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
383
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
384
+
385
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
386
+ # to be able to avoid many of these transpose/reshape/view.
387
+ query_states = query_states.transpose(1, 2)
388
+ key_states = key_states.transpose(1, 2)
389
+ value_states = value_states.transpose(1, 2)
390
+
391
+ dropout_rate = self.attention_dropout if self.training else 0.0
392
+
393
+ input_dtype = query_states.dtype
394
+ if input_dtype == torch.float32:
395
+ if torch.is_autocast_enabled():
396
+ target_dtype = torch.get_autocast_gpu_dtype()
397
+ # Handle the case where the model is quantized
398
+ elif hasattr(self.config, "_pre_quantization_dtype"):
399
+ target_dtype = self.config._pre_quantization_dtype
400
+ else:
401
+ target_dtype = self.q_proj.weight.dtype
402
+
403
+ logger.warning_once(
404
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
405
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
406
+ f" {target_dtype}."
407
+ )
408
+
409
+ query_states = query_states.to(target_dtype)
410
+ key_states = key_states.to(target_dtype)
411
+ value_states = value_states.to(target_dtype)
412
+
413
+ attn_output = _flash_attention_forward(
414
+ query_states,
415
+ key_states,
416
+ value_states,
417
+ attention_mask,
418
+ q_len,
419
+ position_ids=None,
420
+ dropout=dropout_rate,
421
+ sliding_window=getattr(self, "sliding_window", None),
422
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
423
+ is_causal=self.is_causal,
424
+ )
425
+
426
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
427
+ attn_output = self.o_proj(attn_output)
428
+
429
+ if not output_attentions:
430
+ attn_weights = None
431
+
432
+ return attn_output, attn_weights, past_key_value
433
+
434
+
435
+ class LlamaSdpaAttention(LlamaAttention):
436
+ """
437
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
438
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
439
+ SDPA API.
440
+ """
441
+
442
+ # Adapted from LlamaAttention.forward
443
+ def forward(
444
+ self,
445
+ hidden_states: torch.Tensor,
446
+ attention_mask: Optional[torch.Tensor] = None,
447
+ past_key_value: Optional[Cache] = None,
448
+ output_attentions: bool = False,
449
+ use_cache: bool = False,
450
+ cache_position: Optional[torch.LongTensor] = None,
451
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
452
+ **kwargs,
453
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
454
+
455
+ bsz, q_len, _ = hidden_states.size()
456
+
457
+ query_states = self.q_proj(hidden_states)
458
+ key_states = self.k_proj(hidden_states)
459
+ value_states = self.v_proj(hidden_states)
460
+
461
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
462
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
463
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
464
+
465
+ cos, sin = position_embeddings
466
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
467
+
468
+ if past_key_value is not None:
469
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
470
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
471
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
472
+
473
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
474
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
475
+
476
+ causal_mask = attention_mask
477
+ if attention_mask is not None:
478
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
479
+
480
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
481
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
482
+ if query_states.device.type == "cuda" and causal_mask is not None:
483
+ query_states = query_states.contiguous()
484
+ key_states = key_states.contiguous()
485
+ value_states = value_states.contiguous()
486
+
487
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
488
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
489
+ is_causal = True if causal_mask is None and q_len > 1 else False
490
+
491
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
492
+ query_states,
493
+ key_states,
494
+ value_states,
495
+ attn_mask=causal_mask,
496
+ dropout_p=self.attention_dropout if self.training else 0.0,
497
+ is_causal=is_causal,
498
+ )
499
+
500
+ attn_output = attn_output.transpose(1, 2).contiguous()
501
+ attn_output = attn_output.view(bsz, q_len, -1)
502
+
503
+ attn_output = self.o_proj(attn_output)
504
+
505
+ return attn_output, None, past_key_value
506
+
507
+
508
+ LLAMA_ATTENTION_CLASSES = {
509
+ "eager": LlamaAttention,
510
+ "flash_attention_2": LlamaFlashAttention2,
511
+ "sdpa": LlamaSdpaAttention,
512
+ }
513
+
514
+
515
+ class LlamaDecoderLayer(nn.Module):
516
+ def __init__(self, config: LlamaConfig, layer_idx: int):
517
+ super().__init__()
518
+ self.hidden_size = config.hidden_size
519
+
520
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
521
+
522
+ self.mlp = LlamaMLP(config)
523
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
524
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
525
+
526
+ def forward(
527
+ self,
528
+ hidden_states: torch.Tensor,
529
+ attention_mask: Optional[torch.Tensor] = None,
530
+ past_key_value: Optional[Cache] = None,
531
+ output_attentions: Optional[bool] = False,
532
+ use_cache: Optional[bool] = False,
533
+ cache_position: Optional[torch.LongTensor] = None,
534
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
535
+ **kwargs,
536
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
537
+ residual = hidden_states
538
+
539
+ hidden_states = self.input_layernorm(hidden_states)
540
+
541
+ # Self Attention
542
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
543
+ hidden_states=hidden_states,
544
+ attention_mask=attention_mask,
545
+ past_key_value=past_key_value,
546
+ output_attentions=output_attentions,
547
+ use_cache=use_cache,
548
+ cache_position=cache_position,
549
+ position_embeddings=position_embeddings,
550
+ **kwargs,
551
+ )
552
+ hidden_states = residual + hidden_states
553
+
554
+ # Fully Connected
555
+ residual = hidden_states
556
+ hidden_states = self.post_attention_layernorm(hidden_states)
557
+ hidden_states = self.mlp(hidden_states)
558
+ hidden_states = residual + hidden_states
559
+
560
+ outputs = (hidden_states,)
561
+
562
+ if output_attentions:
563
+ outputs += (self_attn_weights,)
564
+
565
+ if use_cache:
566
+ outputs += (present_key_value,)
567
+
568
+ return outputs
models/nextstep_model.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import inspect
4
+ from loguru import logger
5
+ from dataclasses import dataclass
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import CrossEntropyLoss
10
+
11
+ from safetensors.torch import safe_open
12
+ from transformers.modeling_utils import PreTrainedModel
13
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
14
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
15
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
16
+
17
+ from models.config import NextStepConfig
18
+ from models.llama_model import LlamaDecoderLayer, LlamaRMSNorm, LlamaRotaryEmbedding
19
+ from models.heads import FlowMatchingHead
20
+ from utils.misc import LargeInt
21
+ from utils.compile_utils import smart_compile
22
+
23
+
24
+ @dataclass
25
+ class NextStepOutputWithPast(CausalLMOutputWithPast):
26
+ lm_loss: torch.FloatTensor | None = None
27
+ im_loss: torch.FloatTensor | None = None
28
+
29
+
30
+ class NextStepPreTrainedModel(PreTrainedModel):
31
+ config_class = NextStepConfig
32
+ supports_gradient_checkpointing = True
33
+ _no_split_modules = ["LlamaDecoderLayer"]
34
+ _skip_keys_device_placement = ["past_key_values"]
35
+ _supports_flash_attn_2 = True
36
+ _supports_sdpa = True
37
+ _supports_cache_class = True
38
+ _supports_quantized_cache = True
39
+ _supports_static_cache = True
40
+
41
+ def _init_weights(self, module):
42
+ std = self.config.initializer_range
43
+ if isinstance(module, nn.Linear):
44
+ module.weight.data.normal_(mean=0.0, std=std)
45
+ if module.bias is not None:
46
+ module.bias.data.zero_()
47
+ elif isinstance(module, nn.Embedding):
48
+ module.weight.data.normal_(mean=0.0, std=std)
49
+ if module.padding_idx is not None:
50
+ module.weight.data[module.padding_idx].zero_()
51
+
52
+ @property
53
+ def trainable_params(self) -> float:
54
+ n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
55
+ return LargeInt(n_params)
56
+
57
+
58
+ class NextStep(NextStepPreTrainedModel):
59
+
60
+ def __init__(self, config: NextStepConfig):
61
+ super().__init__(config)
62
+ self.padding_idx = config.pad_token_id
63
+ self.vocab_size = config.vocab_size
64
+
65
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
66
+
67
+ self.layers = nn.ModuleList([LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
68
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
69
+ self.rotary_emb = LlamaRotaryEmbedding(config=config)
70
+
71
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
72
+
73
+ self.gradient_checkpointing = False
74
+
75
+
76
+ token_dim = self.config.latent_channels * self.config.latent_patch_size**2
77
+
78
+ self.image_in_projector = nn.Linear(token_dim, config.hidden_size)
79
+ self.image_in_projector.weight.data.normal_(mean=0.0, std=config.initializer_range)
80
+ self.image_in_projector.bias.data.zero_()
81
+
82
+ self.image_out_projector = nn.Linear(config.hidden_size, config.hidden_size)
83
+ self.image_out_projector.weight.data.normal_(mean=0.0, std=config.initializer_range)
84
+ self.image_out_projector.bias.data.zero_()
85
+
86
+ self.image_head = FlowMatchingHead(
87
+ input_dim=token_dim,
88
+ cond_dim=config.hidden_size,
89
+ dim=config.fm_head_dim,
90
+ layers=config.fm_head_layers,
91
+ )
92
+
93
+ def get_input_embeddings(self):
94
+ return self.embed_tokens
95
+
96
+ def set_input_embeddings(self, value):
97
+ self.embed_tokens = value
98
+
99
+ def get_output_embeddings(self):
100
+ return self.lm_head
101
+
102
+ def set_output_embeddings(self, new_embeddings):
103
+ self.lm_head = new_embeddings
104
+
105
+ def load_lm_head(self, lm_head_dir: str | None = None):
106
+ index_json_file = os.path.join(lm_head_dir, "model.safetensors.index.json")
107
+ head_weight_name = "lm_head.weight" if not self.config.tie_word_embeddings else "model.embed_tokens.weight"
108
+ if os.path.exists(index_json_file):
109
+ with open(index_json_file, "r") as f:
110
+ index = json.load(f)
111
+ model_name = index["weight_map"][head_weight_name]
112
+ else:
113
+ model_name = "model.safetensors"
114
+ with safe_open(os.path.join(lm_head_dir, model_name), framework="pt") as f:
115
+ loaded_weight = f.get_tensor(head_weight_name)
116
+ loaded_weight = loaded_weight.to(dtype=self.lm_head.weight.dtype, device=self.lm_head.weight.device)
117
+ self.lm_head.weight.data.copy_(loaded_weight)
118
+
119
+ def patchify(self, img: torch.Tensor):
120
+ """
121
+ img: (bsz, C, H, W)
122
+ x: (bsz, H * W / patch_size**2, patch_size**2 * C)
123
+ """
124
+ bsz, c, h, w = img.shape
125
+ p = self.config.latent_patch_size
126
+ h_, w_ = h // p, w // p
127
+
128
+ img = img.reshape(bsz, c, h_, p, w_, p)
129
+ img = torch.einsum("nchpwq->nhwcpq", img)
130
+ x = img.reshape(bsz, h_ * w_, c * p**2)
131
+ return x
132
+
133
+ def unpatchify(self, x: torch.Tensor, h: int = None, w: int = None):
134
+ """
135
+ x: (bsz, H * W / patch_size**2, patch_size**2 * C)
136
+ img: (bsz, C, H, W)
137
+ """
138
+ bsz = x.shape[0]
139
+ p = self.config.latent_patch_size
140
+ c = self.config.latent_channels
141
+ if h is None and w is None:
142
+ h_ = w_ = int(x.shape[1] ** 0.5)
143
+ else:
144
+ h_, w_ = h, w
145
+ assert h_ * w_ == x.shape[1], f"Invalid sequence length {x.shape[1]}."
146
+
147
+ x = x.reshape(bsz, h_, w_, c, p, p)
148
+ x = torch.einsum("nhwcpq->nchpwq", x)
149
+ img = x.reshape(bsz, c, h_ * p, w_ * p)
150
+ return img
151
+
152
+ def prepare_inputs_embeds(self, input_ids: torch.LongTensor | None = None, latents: torch.FloatTensor | None = None):
153
+ if latents is None:
154
+ if not self.training:
155
+ return self.embed_tokens(input_ids)
156
+ else: # dummy forward for image pass, for the consistent shape of gradient.
157
+ raise NotImplementedError("Dummy forward for image pass is not implemented.")
158
+ else:
159
+ bs, seq_length = input_ids.shape
160
+ inputs_embeds = torch.zeros(
161
+ (bs, seq_length, self.config.hidden_size),
162
+ device=self.embed_tokens.weight.device,
163
+ dtype=self.embed_tokens.weight.dtype,
164
+ )
165
+ im_indices = input_ids == self.config.image_placeholder_id
166
+ lm_indices = ~im_indices
167
+
168
+ if isinstance(latents, list):
169
+ tokens = torch.cat([self.patchify(latent) for latent in latents], dim=1)
170
+ else:
171
+ tokens = self.patchify(latents)
172
+ # tokens = tokens.reshape(1, -1, tokens.shape[-1])
173
+
174
+ image_embeds = self.image_in_projector(tokens)
175
+ image_embeds = image_embeds.view(-1, self.config.hidden_size)
176
+
177
+ token_embeds = self.embed_tokens(input_ids[lm_indices])
178
+
179
+ inputs_embeds[im_indices] = image_embeds.to(inputs_embeds.dtype)
180
+ inputs_embeds[lm_indices] = token_embeds
181
+
182
+ return inputs_embeds
183
+
184
+ def _update_causal_mask(
185
+ self,
186
+ attention_mask: torch.Tensor,
187
+ input_tensor: torch.Tensor,
188
+ cache_position: torch.Tensor,
189
+ past_key_values: Cache,
190
+ output_attentions: bool,
191
+ ):
192
+ if self.config._attn_implementation == "flash_attention_2":
193
+ if attention_mask is not None and (attention_mask == 0.0).any():
194
+ return attention_mask
195
+ return None
196
+
197
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
198
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
199
+ # to infer the attention mask.
200
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
201
+ using_static_cache = isinstance(past_key_values, StaticCache)
202
+
203
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
204
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
205
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
206
+ attention_mask,
207
+ inputs_embeds=input_tensor,
208
+ past_key_values_length=past_seen_tokens,
209
+ is_training=self.training,
210
+ ):
211
+ return None
212
+
213
+ dtype, device = input_tensor.dtype, input_tensor.device
214
+ sequence_length = input_tensor.shape[1]
215
+ if using_static_cache:
216
+ target_length = past_key_values.get_max_cache_shape()
217
+ else:
218
+ target_length = (
219
+ attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
220
+ )
221
+
222
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
223
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
224
+ attention_mask,
225
+ sequence_length=sequence_length,
226
+ target_length=target_length,
227
+ dtype=dtype,
228
+ device=device,
229
+ cache_position=cache_position,
230
+ batch_size=input_tensor.shape[0],
231
+ )
232
+
233
+ if (
234
+ self.config._attn_implementation == "sdpa"
235
+ and attention_mask is not None
236
+ and attention_mask.device.type == "cuda"
237
+ and not output_attentions
238
+ ):
239
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
240
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
241
+ # Details: https://github.com/pytorch/pytorch/issues/110213
242
+ min_dtype = torch.finfo(dtype).min
243
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
244
+
245
+ return causal_mask
246
+
247
+ @staticmethod
248
+ def _prepare_4d_causal_attention_mask_with_cache_position(
249
+ attention_mask: torch.Tensor,
250
+ sequence_length: int,
251
+ target_length: int,
252
+ dtype: torch.dtype,
253
+ device: torch.device,
254
+ cache_position: torch.Tensor,
255
+ batch_size: int,
256
+ **kwargs,
257
+ ):
258
+ """
259
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
260
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
261
+
262
+ Args:
263
+ attention_mask (`torch.Tensor`):
264
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
265
+ `(batch_size, 1, query_length, key_value_length)`.
266
+ sequence_length (`int`):
267
+ The sequence length being processed.
268
+ target_length (`int`):
269
+ The target length: when generating with static cache, the mask should be as long as the static cache,
270
+ to account for the 0 padding, the part of the cache that is not filled yet.
271
+ dtype (`torch.dtype`):
272
+ The dtype to use for the 4D attention mask.
273
+ device (`torch.device`):
274
+ The device to plcae the 4D attention mask on.
275
+ cache_position (`torch.Tensor`):
276
+ Indices depicting the position of the input sequence tokens in the sequence.
277
+ batch_size (`torch.Tensor`):
278
+ Batch size.
279
+ """
280
+ if attention_mask is not None and attention_mask.dim() == 4:
281
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
282
+ causal_mask = attention_mask
283
+ else:
284
+ min_dtype = torch.finfo(dtype).min
285
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
286
+ if sequence_length != 1:
287
+ causal_mask = torch.triu(causal_mask, diagonal=1)
288
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
289
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
290
+ if attention_mask is not None:
291
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
292
+ mask_length = attention_mask.shape[-1]
293
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
294
+ padding_mask = padding_mask == 0
295
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
296
+
297
+ return causal_mask
298
+
299
+ @smart_compile()
300
+ def forward_model(
301
+ self,
302
+ inputs_embeds: torch.FloatTensor | None = None,
303
+ attention_mask: torch.Tensor | None = None,
304
+ past_key_values: Cache | list[torch.FloatTensor] | None = None,
305
+ use_cache: bool | None = None,
306
+ output_attentions: bool | None = None,
307
+ output_hidden_states: bool | None = None,
308
+ cache_position: torch.LongTensor | None = None,
309
+ ) -> tuple | BaseModelOutputWithPast:
310
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
311
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
312
+
313
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
314
+ if self.gradient_checkpointing and self.training and use_cache:
315
+ use_cache = False
316
+
317
+ if use_cache and past_key_values is None:
318
+ past_key_values = DynamicCache()
319
+
320
+ if cache_position is None:
321
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
322
+ cache_position = torch.arange(
323
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
324
+ )
325
+ position_ids = cache_position.unsqueeze(0)
326
+
327
+ causal_mask = self._update_causal_mask(
328
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
329
+ )
330
+ hidden_states = inputs_embeds
331
+
332
+ # create position embeddings to be shared across the decoder layers
333
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
334
+
335
+ # decoder layers
336
+ all_hidden_states = () if output_hidden_states else None
337
+ all_self_attns = () if output_attentions else None
338
+
339
+ for decoder_layer in self.layers:
340
+ if output_hidden_states:
341
+ all_hidden_states += (hidden_states,)
342
+
343
+ if self.gradient_checkpointing and self.training:
344
+ layer_outputs = self._gradient_checkpointing_func(
345
+ decoder_layer.__call__,
346
+ hidden_states,
347
+ causal_mask,
348
+ past_key_values,
349
+ output_attentions,
350
+ use_cache,
351
+ cache_position,
352
+ position_embeddings,
353
+ )
354
+ else:
355
+ layer_outputs = decoder_layer(
356
+ hidden_states,
357
+ attention_mask=causal_mask,
358
+ past_key_value=past_key_values,
359
+ output_attentions=output_attentions,
360
+ use_cache=use_cache,
361
+ cache_position=cache_position,
362
+ position_embeddings=position_embeddings,
363
+ )
364
+
365
+ hidden_states = layer_outputs[0]
366
+
367
+ if output_attentions:
368
+ all_self_attns += (layer_outputs[1],)
369
+
370
+ hidden_states = self.norm(hidden_states)
371
+
372
+ # add hidden states from the last decoder layer
373
+ if output_hidden_states:
374
+ all_hidden_states += (hidden_states,)
375
+
376
+ return BaseModelOutputWithPast(
377
+ last_hidden_state=hidden_states,
378
+ past_key_values=past_key_values if use_cache else None,
379
+ hidden_states=all_hidden_states,
380
+ attentions=all_self_attns,
381
+ )
382
+
383
+
384
+ def prepare_inputs_for_generation(
385
+ self,
386
+ input_ids: torch.LongTensor,
387
+ past_key_values: Cache | None = None,
388
+ attention_mask: torch.LongTensor | None = None,
389
+ inputs_embeds: torch.FloatTensor | None = None,
390
+ cache_position: torch.LongTensor | None = None,
391
+ **kwargs,
392
+ ):
393
+ """
394
+ Prepare the model inputs for generation. In includes operations like computing the 4D attention mask or
395
+ slicing inputs given the existing cache.
396
+
397
+ See the forward pass in the model documentation for expected arguments (different models might have different
398
+ requirements for e.g. `past_key_values`). This function should work as is for most LLMs.
399
+ """
400
+
401
+ # 1. Handle BC:
402
+ model_inputs = {}
403
+ # - some models don't have `Cache` support (which implies they don't expect `cache_position` in `forward`)
404
+ if self._supports_cache_class:
405
+ model_inputs["cache_position"] = cache_position
406
+ # - `cache_position` was not a mandatory input in `prepare_inputs_for_generation` for those models, and this
407
+ # function may be called outside of `generate`. Handle most use cases by creating `cache_position` on the fly
408
+ # (this alternative is not as robust as calling `generate` and letting it create `cache_position`)
409
+ elif cache_position is None:
410
+ past_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
411
+ cache_position = torch.arange(past_length, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
412
+
413
+ # 2. Generic cache-dependent input preparation
414
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
415
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
416
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
417
+ # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case
418
+ if past_key_values is not None:
419
+ model_inputs["past_key_values"] = past_key_values
420
+ if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]: # Exception 1 or Exception 3
421
+ input_ids = input_ids[:, -cache_position.shape[0] :]
422
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
423
+ input_ids = input_ids[:, cache_position]
424
+
425
+ # 3. Prepare base model inputs
426
+ input_ids_key = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
427
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
428
+ if not self.config.is_encoder_decoder:
429
+ if inputs_embeds is not None and cache_position[0] == 0:
430
+ model_inputs[input_ids_key] = None
431
+ model_inputs["inputs_embeds"] = inputs_embeds
432
+ else:
433
+ # `clone` calls in this function ensure a consistent stride. See #32227
434
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
435
+ model_inputs["inputs_embeds"] = None
436
+ else:
437
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
438
+
439
+ # 4. Create missing `position_ids` on the fly
440
+ if (
441
+ attention_mask is not None
442
+ and kwargs.get("position_ids") is None
443
+ and "position_ids" in set(inspect.signature(self.forward).parameters.keys())
444
+ ):
445
+ position_ids = attention_mask.long().cumsum(-1) - 1
446
+ position_ids.masked_fill_(attention_mask == 0, 1)
447
+ kwargs["position_ids"] = position_ids # placed in kwargs for further processing (see below)
448
+
449
+ # 5. Slice model inputs if it's an input that should have the same length as `input_ids`
450
+ for model_input_name in ["position_ids", "token_type_ids"]:
451
+ model_input = kwargs.get(model_input_name)
452
+ if model_input is not None:
453
+ if past_key_values:
454
+ model_input = model_input[:, -input_ids.shape[1] :]
455
+ model_input = model_input.clone(memory_format=torch.contiguous_format)
456
+ model_inputs[model_input_name] = model_input
457
+
458
+ # 6. Create 4D attention mask is we are using a `StaticCache` (important for performant compiled forward pass)
459
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
460
+ if model_inputs["inputs_embeds"] is not None:
461
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
462
+ device = model_inputs["inputs_embeds"].device
463
+ else:
464
+ batch_size, sequence_length = model_inputs[input_ids_key].shape
465
+ device = model_inputs[input_ids_key].device
466
+
467
+ # Create the causal mask with fixed shape in advance, to reduce recompilations. If the function to create
468
+ # the 4D causal mask exists, it should be present in the base model (XXXModel class).
469
+ base_model = getattr(self, self.base_model_prefix, None)
470
+ if base_model is None:
471
+ causal_mask_creation_function = getattr(self, "_prepare_4d_causal_attention_mask_with_cache_position", None)
472
+ else:
473
+ causal_mask_creation_function = getattr(
474
+ base_model, "_prepare_4d_causal_attention_mask_with_cache_position", None
475
+ )
476
+ if causal_mask_creation_function is None:
477
+ logger.warning_once(
478
+ f"{self.__class__.__name__} has no `_prepare_4d_causal_attention_mask_with_cache_position` method "
479
+ "defined in its base modeling class. Compiled forward passes will be sub-optimal. If you're "
480
+ "writing code, see Llama for an example implementation. If you're a user, please report this "
481
+ "issue on GitHub."
482
+ )
483
+ else:
484
+ attention_mask = causal_mask_creation_function(
485
+ attention_mask,
486
+ sequence_length=sequence_length,
487
+ target_length=past_key_values.get_max_cache_shape(),
488
+ dtype=self.dtype,
489
+ device=device,
490
+ cache_position=cache_position,
491
+ batch_size=batch_size,
492
+ config=self.config,
493
+ past_key_values=past_key_values,
494
+ )
495
+ if attention_mask is not None:
496
+ model_inputs["attention_mask"] = attention_mask
497
+
498
+ # 7. Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
499
+ for key, value in kwargs.items():
500
+ if key not in model_inputs:
501
+ model_inputs[key] = value
502
+
503
+ # 8. Remove unexpected `generate` inputs (TODO @joao: fix trainer and examples)
504
+ model_inputs.pop("labels", None)
505
+ return model_inputs
506
+
507
+ @torch.no_grad()
508
+ def generate(self, inputs: torch.LongTensor = None, **kwargs):
509
+ input_ids = kwargs.pop("input_ids")
510
+ latents = kwargs.pop("latents", None)
511
+ inputs_embeds = self.prepare_inputs_embeds(input_ids, latents)
512
+ return super().generate(inputs=inputs, input_ids=input_ids, inputs_embeds=inputs_embeds, **kwargs)
513
+
514
+ def gradient_checkpointing_enable(self, **kwargs):
515
+ super().gradient_checkpointing_enable(**kwargs)
516
+
517
+ self.image_head.net.grad_checkpointing = True
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.34.0
2
+ einops==0.8.1
3
+ gradio==5.42.0
4
+ loguru==0.7.3
5
+ numpy==2.3.2
6
+ omegaconf==2.3.0
7
+ Pillow==11.3.0
8
+ Requests==2.32.4
9
+ safetensors==0.6.2
10
+ tabulate==0.9.0
11
+ torch==2.5.1
12
+ torchvision==0.20.1
13
+ tqdm==4.67.1
14
+ transformers==4.55.0
utils/aspect_ratio.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import PIL.Image
3
+
4
+ ANY_ASPECT_RATIO = (0, 0)
5
+
6
+ HW_ASPECT_RATIOS = [
7
+ (8, 32), # 256
8
+ (9, 28), # 252
9
+ (10, 25), # 250
10
+ (11, 23), # 253
11
+ (12, 21), # 252
12
+ (13, 19), # 247
13
+ (14, 18), # 252
14
+ (15, 17), # 255
15
+ (16, 16), # 256
16
+ (17, 15), # 255
17
+ (18, 14), # 252
18
+ (19, 13), # 247
19
+ (21, 12), # 252
20
+ (23, 11), # 253
21
+ (25, 10), # 250
22
+ (28, 9), # 252
23
+ (32, 8), # 256
24
+ ]
25
+
26
+
27
+ def get_ar_base(ars: list[tuple[int, int]] = HW_ASPECT_RATIOS):
28
+ sqrt_products = [round(np.sqrt(h * w)) for h, w in ars]
29
+ return round(np.mean(sqrt_products))
30
+
31
+
32
+ def ar2str(h: int, w: int) -> str:
33
+ return f"{h}*{w}"
34
+
35
+
36
+ def str2ar(s: str) -> tuple[int, int]:
37
+ return tuple(map(int, s.split("*")))
38
+
39
+ def center_crop_arr_with_buckets(pil_image, ars: list[tuple[int, int]] = HW_ASPECT_RATIOS, crop=True, buckets: list[int] = [256, 512, 768, 1024]):
40
+ """
41
+ Center crop the image to match the closest aspect ratio from the provided list.
42
+
43
+ Args:
44
+ pil_image: PIL Image to be cropped
45
+ image_size: Target size for the smaller dimension
46
+ ars: List of aspect ratios as (height, width) tuples
47
+
48
+ Returns:
49
+ PIL Image cropped to the closest aspect ratio
50
+ """
51
+ # ar_base = get_ar_base(ars)
52
+ # Get current image dimensions
53
+ width, height = pil_image.size
54
+
55
+ buckets = sorted(buckets, reverse=True)
56
+ image_size = buckets[-1]
57
+
58
+ for bucket in buckets:
59
+ if width * height >= bucket * bucket:
60
+ image_size = bucket
61
+ break
62
+
63
+ return center_crop_arr_with_ar(pil_image, image_size, ars, crop)
64
+
65
+ def center_crop_arr_with_ar(pil_image, image_size: int, ars: list[tuple[int, int]] = HW_ASPECT_RATIOS, crop=True):
66
+ """
67
+ Center crop the image to match the closest aspect ratio from the provided list.
68
+
69
+ Args:
70
+ pil_image: PIL Image to be cropped
71
+ image_sizes: Target size for the smaller dimension
72
+ ars: List of aspect ratios as (height, width) tuples
73
+
74
+ Returns:
75
+ PIL Image cropped to the closest aspect ratio
76
+ """
77
+
78
+ ar_base = get_ar_base(ars)
79
+ assert image_size % ar_base == 0, f"image_size must be divisible by {ar_base}"
80
+
81
+ # Get current image dimensions
82
+ width, height = pil_image.size
83
+
84
+ current_ar = height / width
85
+
86
+ # Find the closest aspect ratio
87
+ closest_ar_idx = np.argmin([abs(current_ar - (h / w)) for h, w in ars])
88
+ target_h, target_w = ars[closest_ar_idx]
89
+
90
+ if crop:
91
+ target_h, target_w = round(image_size / ar_base * target_h), round(image_size / ar_base * target_w)
92
+
93
+ # First, resize the image while maintaining aspect ratio to ensure the smaller dimension is at least the target size
94
+ scale = max(target_h / height, target_w / width)
95
+ new_height = round(height * scale)
96
+ new_width = round(width * scale)
97
+ pil_image = pil_image.resize((new_width, new_height), resample=PIL.Image.LANCZOS)
98
+
99
+ arr = np.array(pil_image)
100
+ # Then perform center crop to the target dimensions
101
+ crop_y = (new_height - target_h) // 2
102
+ crop_x = (new_width - target_w) // 2
103
+
104
+ return PIL.Image.fromarray(arr[crop_y : crop_y + target_h, crop_x : crop_x + target_w])
105
+ else:
106
+ scale = image_size // ar_base
107
+ return pil_image.resize((round(target_w * scale), round(target_h * scale)), resample=PIL.Image.LANCZOS)
utils/compile_utils.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import os
4
+ from typing import Callable, Dict, Optional
5
+
6
+ import torch
7
+
8
+ from loguru import logger
9
+
10
+ """
11
+ Usage:
12
+
13
+ 1. Control through environment variable (at startup):
14
+ export TORCH_COMPILE_ENABLE=true
15
+ python your_script.py
16
+
17
+ 2. Control through environment variable (disable):
18
+ export TORCH_COMPILE_ENABLE=false # or not set
19
+ python your_script.py
20
+
21
+ 3. Dynamically control in code:
22
+ compile_manager.set_compile_enabled(True) # enable
23
+ compile_manager.set_compile_enabled(False) # disable
24
+
25
+ 4. Select version at runtime:
26
+ # use the version configured
27
+ result = my_function(args)
28
+
29
+ # force use the original version
30
+ result = my_function.original(args)
31
+
32
+ # force use the compiled version
33
+ result = my_function.compiled(args)
34
+ """
35
+
36
+ # Global configuration: control whether to enable compile through environment variables
37
+ # Default set this env to true
38
+ ENABLE_TORCH_COMPILE = os.getenv("ENABLE_TORCH_COMPILE", "false").lower() == "true"
39
+
40
+
41
+ class CompileManager:
42
+ """Global controller for torch.compile"""
43
+
44
+ def __init__(self):
45
+ self.compile_enabled = ENABLE_TORCH_COMPILE
46
+ self.compiled_functions: Dict[str, Callable] = {}
47
+ self.original_functions: Dict[str, Callable] = {}
48
+
49
+ def set_compile_enabled(self, enabled: bool):
50
+ """Dynamic setting of whether to enable compile"""
51
+ self.compile_enabled = enabled
52
+
53
+ def get_compile_status(self):
54
+ """Get the current compile status"""
55
+ return self.compile_enabled
56
+
57
+ @contextlib.contextmanager
58
+ def compile_disabled(self):
59
+ """Temporarily disable compile within the context"""
60
+ original_status = self.compile_enabled
61
+ try:
62
+ self.compile_enabled = False
63
+ yield
64
+ finally:
65
+ self.compile_enabled = original_status
66
+
67
+
68
+ # global instance
69
+ compile_manager = CompileManager()
70
+
71
+
72
+ def smart_compile(func: Optional[Callable] = None, **compile_kwargs):
73
+ """
74
+ Smart compile decorator
75
+
76
+ Args:
77
+ func: The function to decorate
78
+ **compile_kwargs: Other compile parameters, see https://pytorch.org/docs/stable/generated/torch.compile.html
79
+ """
80
+
81
+ def decorator(fn: Callable) -> Callable:
82
+ # save the original function
83
+ original_func = fn
84
+ # Use qualified name to handle functions with same name in different classes
85
+ # Include module name to handle functions with same name in different files
86
+ func_name = f"{fn.__module__}.{fn.__qualname__}"
87
+ compile_manager.original_functions[func_name] = original_func
88
+
89
+ # if compile is disabled, return the original function
90
+ if not compile_manager.compile_enabled:
91
+ # add attributes to the original function for later access
92
+ original_func.original = original_func
93
+ original_func.compiled = original_func # point to itself
94
+ return original_func
95
+
96
+ # create the compiled function
97
+ try:
98
+ compiled_func = torch.compile(original_func, **compile_kwargs)
99
+ compile_manager.compiled_functions[func_name] = compiled_func
100
+ except Exception as e:
101
+ logger.warning(f"[WARNING] Failed to compile function {func_name}: {e}")
102
+ # if compile fails, revert to the original function
103
+ compiled_func = original_func
104
+
105
+ @functools.wraps(original_func)
106
+ def wrapper(*args, **kwargs):
107
+ # check whether to use the compiled version at runtime
108
+ if compile_manager.compile_enabled:
109
+ return compiled_func(*args, **kwargs)
110
+ else:
111
+ return original_func(*args, **kwargs)
112
+
113
+ # add attributes to the wrapper for later access
114
+ wrapper.original = original_func
115
+ wrapper.compiled = compiled_func
116
+
117
+ return wrapper
118
+
119
+ # support direct use of @smart_compile or @smart_compile(...)
120
+ if func is not None:
121
+ return decorator(func)
122
+ return decorator
utils/image_utils.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from typing import Literal, TypeAlias
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import PIL.ImageOps
8
+ import requests
9
+ import torch
10
+
11
+ """
12
+ - pil: `PIL.Image.Image`, size (w, h), seamless conversion between `uint8`
13
+ - np: `np.ndarray`, shape (h, w, c), default `np.uint8`
14
+ - pt: `torch.Tensor`, shape (c, h, w), default `torch.uint8`
15
+ """
16
+ ImageType: TypeAlias = PIL.Image.Image | np.ndarray | torch.Tensor
17
+ ImageTypeStr: TypeAlias = Literal["pil", "np", "pt"]
18
+ ImageFormat: TypeAlias = Literal["JPEG", "PNG"]
19
+ DataFormat: TypeAlias = Literal["255", "01", "11"]
20
+
21
+
22
+ IMG_SUPPORT_MODE = ["L", "LA", "RGB", "RGBA", "CMYK", "P", "1"]
23
+ IMAGE_EXT_LOWER = ["png", "jpeg", "jpg", "webp"]
24
+ IMAGE_EXT = IMAGE_EXT_LOWER + [_ext.upper() for _ext in IMAGE_EXT_LOWER]
25
+
26
+
27
+ def check_image_type(image: ImageType):
28
+ if not (isinstance(image, PIL.Image.Image) or isinstance(image, np.ndarray) or isinstance(image, torch.Tensor)):
29
+ raise TypeError(f"`image` should be PIL Image, ndarray or Tensor. Got `{type(image)}`.")
30
+
31
+
32
+ def to_rgb(image: PIL.Image.Image) -> PIL.Image.Image:
33
+ # Automatically adjust the orientation of the image to match the direction it was taken.
34
+ image = PIL.ImageOps.exif_transpose(image)
35
+
36
+ if image.mode not in IMG_SUPPORT_MODE:
37
+ raise ValueError(f"Only support mode in `{IMG_SUPPORT_MODE}`, got `{image.mode}`")
38
+
39
+ if image.mode == "LA":
40
+ image = image.convert("RGBA")
41
+
42
+ # add white background for RGBA images, and convert to RGB
43
+ if image.mode == "RGBA":
44
+ background = PIL.Image.new("RGBA", image.size, "white")
45
+ image = PIL.Image.alpha_composite(background, image).convert("RGB")
46
+
47
+ # then convert to RGB
48
+ image = image.convert("RGB")
49
+
50
+ return image
51
+
52
+
53
+ def load_image(
54
+ image: str | os.PathLike | PIL.Image.Image | bytes,
55
+ *,
56
+ output_type: ImageTypeStr = "pil",
57
+ ) -> ImageType:
58
+ """
59
+ Loads `image` to a PIL Image, NumPy array or PyTorch tensor.
60
+
61
+ Args:
62
+ image (str | PIL.Image.Image): The path to image or PIL Image.
63
+ mode (ImageMode, optional): The mode to convert to. Defaults to None (no conversion).
64
+ The current version supports all possible conversions between "L", "RGB", "RGBA".
65
+ output_type (ImageTypeStr, optional): The type of the output image. Defaults to "pil".
66
+ The current version supports "pil", "np", "pt".
67
+
68
+ Returns:
69
+ ImageType: The loaded image in the given type.
70
+ """
71
+ timeout = 10
72
+ # Load the `image` into a PIL Image.
73
+ if isinstance(image, str) or isinstance(image, os.PathLike):
74
+ if image.startswith("http://") or image.startswith("https://"):
75
+ try:
76
+ image = PIL.Image.open(requests.get(image, stream=True, timeout=timeout).raw)
77
+ except requests.exceptions.Timeout:
78
+ raise ValueError(f"HTTP request timed out after {timeout} seconds")
79
+ elif os.path.isfile(image):
80
+ image = PIL.Image.open(image)
81
+ else:
82
+ raise ValueError(
83
+ f"Incorrect path or url, URLs must start with `http://`, `https://` or `s3+[profile]://`, and `{image}` is not a valid path."
84
+ )
85
+ elif isinstance(image, PIL.Image.Image):
86
+ image = image
87
+ elif isinstance(image, bytes):
88
+ image = PIL.Image.open(io.BytesIO(image))
89
+ else:
90
+ raise ValueError(f"`image` must be a path or PIL Image, got `{type(image)}`")
91
+
92
+ image = to_rgb(image)
93
+
94
+ if output_type == "pil":
95
+ image = image
96
+ elif output_type == "np":
97
+ image = to_np(image)
98
+ elif output_type == "pt":
99
+ image = to_pt(image)
100
+ else:
101
+ raise ValueError(f"`output_type` must be one of `{ImageTypeStr}`, got `{output_type}`")
102
+
103
+ return image
104
+
105
+
106
+ def to_pil(image: ImageType, image_mode: DataFormat | None = None) -> PIL.Image.Image:
107
+ """
108
+ Convert a NumPy array or a PyTorch tensor to a PIL image.
109
+ """
110
+ check_image_type(image)
111
+
112
+ if isinstance(image, PIL.Image.Image):
113
+ return image
114
+
115
+ elif isinstance(image, np.ndarray):
116
+ image = normalize_np(image, image_mode)
117
+
118
+ elif isinstance(image, torch.Tensor):
119
+ image = normalize_pt(image, image_mode)
120
+
121
+ image = image.cpu().permute(1, 2, 0).numpy()
122
+ assert image.dtype == np.uint8, f"Supposed to convert `torch.uint8` to `np.uint8`, but got `{image.dtype}`"
123
+
124
+ mode_map = {1: "L", 3: "RGB"}
125
+ mode = mode_map[image.shape[-1]]
126
+
127
+ if image.shape[-1] == 1:
128
+ image = image[:, :, 0]
129
+
130
+ return PIL.Image.fromarray(image, mode=mode)
131
+
132
+
133
+ def to_np(image: ImageType, image_mode: DataFormat | None = None) -> np.ndarray:
134
+ """
135
+ Convert a PIL image or a PyTorch tensor to a NumPy array.
136
+ """
137
+ check_image_type(image)
138
+
139
+ if isinstance(image, PIL.Image.Image):
140
+ image = np.array(image, np.uint8, copy=True)
141
+
142
+ if isinstance(image, np.ndarray):
143
+ image = normalize_np(image, image_mode)
144
+
145
+ elif isinstance(image, torch.Tensor):
146
+ image = normalize_pt(image, image_mode)
147
+
148
+ image = image.cpu().permute(1, 2, 0).numpy()
149
+ assert image.dtype == np.uint8, f"Supposed to convert `torch.uint8` to `np.uint8`, but got `{image.dtype}`"
150
+
151
+ return image
152
+
153
+
154
+ def to_pt(image: ImageType, image_mode: DataFormat | None = None) -> torch.Tensor:
155
+ """
156
+ Convert a PIL image or a NumPy array to a PyTorch tensor.
157
+ """
158
+ check_image_type(image)
159
+
160
+ if isinstance(image, torch.Tensor):
161
+ image = normalize_pt(image, image_mode)
162
+ return image
163
+
164
+ # convert PIL Image to NumPy array
165
+ if isinstance(image, PIL.Image.Image):
166
+ image = np.array(image, np.uint8, copy=True)
167
+
168
+ image = normalize_np(image, image_mode)
169
+
170
+ image = torch.from_numpy(image.transpose((2, 0, 1))).contiguous()
171
+ assert image.dtype == torch.uint8, f"Supposed to convert `np.uint8` to `torch.uint8`, but got `{image.dtype}`"
172
+ return image
173
+
174
+
175
+ def normalize_np(image: np.ndarray, image_mode: DataFormat | None = None) -> np.ndarray:
176
+ """
177
+ Normalize a NumPy array to the standard format of shape (h, w, c) and uint8.
178
+ """
179
+ if image.ndim not in {2, 3}:
180
+ raise ValueError(f"`image` should be 2 or 3 dimensions. Got {image.ndim} dimensions.")
181
+
182
+ elif image.ndim == 2:
183
+ # if 2D image, add channel dimension (HWC)
184
+ image = np.expand_dims(image, 2)
185
+
186
+ if image.shape[-1] not in {1, 3}:
187
+ raise ValueError(f"`image` should have 1 (`L`) or 3 (`RGB`) channels. Got {image.shape[-1]} channels.")
188
+
189
+ image = to_dataformat(image, image_mode=image_mode, mode="255")
190
+
191
+ return image
192
+
193
+
194
+ def normalize_pt(image: torch.Tensor, image_mode: DataFormat | None = None) -> torch.Tensor:
195
+ """
196
+ Normalize a PyTorch tensor to the standard format of shape (c, h, w) and uint8.
197
+ """
198
+ if image.ndimension() not in {2, 3}:
199
+ raise ValueError(f"`image` should be 2 or 3 dimensions. Got {image.ndimension()} dimensions.")
200
+
201
+ elif image.ndimension() == 2:
202
+ # if 2D image, add channel dimension (CHW)
203
+ image = image.unsqueeze(0)
204
+
205
+ # check number of channels
206
+ if image.shape[-3] not in {1, 3}:
207
+ raise ValueError(f"`image` should have 1 (`L`) or 3 (`RGB`) channels. Got {image.shape[-3]} channels.")
208
+
209
+ image = to_dataformat(image, image_mode=image_mode, mode="255")
210
+
211
+ return image
212
+
213
+
214
+ def to_dataformat(
215
+ image: ImageType,
216
+ *,
217
+ image_mode: DataFormat | None = None,
218
+ mode: DataFormat = "255",
219
+ ) -> np.ndarray | torch.Tensor:
220
+ check_image_type(image)
221
+
222
+ # convert PIL Image to NumPy array
223
+ if isinstance(image, PIL.Image.Image):
224
+ image = np.array(image, np.uint8, copy=True)
225
+ image_mode = "255"
226
+
227
+ # guess image mode
228
+ if image.dtype == np.uint8 or image.dtype == torch.uint8:
229
+ guess_image_mode = "255"
230
+ elif image.dtype == np.float32 or image.dtype == np.float16 or image.dtype == torch.float32 or image.dtype == torch.float16:
231
+ if image.min() < 0.0:
232
+ guess_image_mode = "11"
233
+ else:
234
+ guess_image_mode = "01"
235
+ else:
236
+ raise ValueError(f"Unsupported dtype `{image.dtype}`")
237
+
238
+ if image_mode is None:
239
+ image_mode = guess_image_mode
240
+ else:
241
+ if guess_image_mode != image_mode:
242
+ print(f"Guess image mode is `{guess_image_mode}`, but image mode is `{image_mode}`")
243
+
244
+ if isinstance(image, np.ndarray):
245
+ if image_mode == "255" and mode != "255":
246
+ np.clip((image.astype(np.float32) / 255), 0, 1, out=image)
247
+ if mode == "11":
248
+ np.clip((image * 2 - 1), -1, 1, out=image)
249
+
250
+ elif image_mode == "01" and mode != "01":
251
+ if mode == "255":
252
+ np.clip(image, 0, 1, out=image)
253
+ image = (image * 255).round().astype(np.uint8)
254
+ elif mode == "11":
255
+ np.clip((image * 2 - 1), -1, 1, out=image)
256
+
257
+ elif image_mode == "11" and mode != "11":
258
+ np.clip((image / 2 + 0.5), 0, 1, out=image)
259
+ if mode == "255":
260
+ image = (image * 255).round().astype(np.uint8)
261
+
262
+ elif isinstance(image, torch.Tensor):
263
+ if image_mode == "255" and mode != "255":
264
+ image = image.to(dtype=torch.float32).div(255).clamp(0, 1)
265
+ if mode == "11":
266
+ image = (image * 2 - 1).clamp(-1, 1)
267
+
268
+ elif image_mode == "01" and mode != "01":
269
+ if mode == "255":
270
+ image = image.clamp(0, 1)
271
+ image = (image * 255).round().to(dtype=torch.uint8)
272
+ elif mode == "11":
273
+ image = (image * 2 - 1).clamp(-1, 1)
274
+
275
+ elif image_mode == "11" and mode != "11":
276
+ image = (image / 2 + 0.5).clamp(0, 1)
277
+ if mode == "255":
278
+ image = image.mul(255).round().to(dtype=torch.uint8)
279
+
280
+ return image
281
+
282
+
283
+ def resize_image(pil_image, image_size):
284
+ while min(*pil_image.size) >= 2 * image_size:
285
+ pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=PIL.Image.BOX)
286
+
287
+ scale = image_size / min(*pil_image.size)
288
+ pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=PIL.Image.BICUBIC)
289
+ return pil_image
290
+
291
+
292
+ def center_crop_arr(pil_image, image_size, crop=True):
293
+ """
294
+ Center cropping implementation from ADM.
295
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
296
+ """
297
+ if crop:
298
+ pil_image = resize_image(pil_image, image_size)
299
+ arr = np.array(pil_image)
300
+ crop_y = (arr.shape[0] - image_size) // 2
301
+ crop_x = (arr.shape[1] - image_size) // 2
302
+ return PIL.Image.fromarray(arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size])
303
+ else:
304
+ # 将图像填充为正方形
305
+ width, height = pil_image.size
306
+ if width != height:
307
+ # 创建一个正方形画布,尺寸为较大的边长
308
+ max_dim = max(width, height)
309
+ padded_img = PIL.Image.new(pil_image.mode, (max_dim, max_dim), (0, 0, 0))
310
+ # 将原图居中粘贴到正方形画布上
311
+ padded_img.paste(pil_image, ((max_dim - width) // 2, (max_dim - height) // 2))
312
+ pil_image = padded_img
313
+ pil_image = resize_image(pil_image, image_size)
314
+ return pil_image
utils/misc.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import random
4
+
5
+ import torch
6
+
7
+
8
+ def set_seed(seed: int, rank: int = 0):
9
+ random.seed(seed + rank)
10
+ np.random.seed(seed + rank)
11
+ torch.manual_seed(seed + rank)
12
+ torch.cuda.manual_seed_all(seed + rank)
13
+ torch.backends.cudnn.deterministic = True
14
+ os.environ["PYTHONHASHSEED"] = str(seed + rank)
15
+
16
+ class LargeInt(int):
17
+ def __new__(cls, value):
18
+ if isinstance(value, str):
19
+ units = {"K": 1e3, "M": 1e6, "B": 1e9, "T": 1e12}
20
+ last_char = value[-1].upper()
21
+ if last_char in units:
22
+ num = float(value[:-1]) * units[last_char]
23
+ return super(LargeInt, cls).__new__(cls, int(num))
24
+ else:
25
+ return super(LargeInt, cls).__new__(cls, int(value))
26
+ else:
27
+ return super(LargeInt, cls).__new__(cls, value)
28
+
29
+ def __str__(self):
30
+ value = int(self)
31
+ if abs(value) < 1000:
32
+ return f"{value}"
33
+ for unit in ["", "K", "M", "B", "T"]:
34
+ if abs(value) < 1000:
35
+ return f"{value:.1f}{unit}"
36
+ value /= 1000
37
+ return f"{value:.1f}P" # P stands for Peta, or 10^15
38
+
39
+ def __repr__(self):
40
+ return f'"{self.__str__()}"' # Ensure repr also returns the string with quotes
41
+
42
+ def __json__(self):
43
+ return f'"{self.__str__()}"'
44
+
45
+ def __add__(self, other):
46
+ if isinstance(other, int):
47
+ return LargeInt(super().__add__(other))
48
+ return NotImplemented
49
+
50
+ def __radd__(self, other):
51
+ return self.__add__(other) # This ensures commutativity
utils/model_utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def expand_t(t, x):
5
+ """Function to reshape time t to broadcastable dimension of x
6
+ Args:
7
+ t: [bsz,], time vector
8
+ x: [bsz,...], data point
9
+ """
10
+ dims = [1] * (len(x.size()) - 1)
11
+ t = t.view(t.size(0), *dims)
12
+ return t
13
+
14
+
15
+ def randn_tensor(shape, noise_repeat, device, dtype=torch.float32):
16
+ bsz = shape[0]
17
+ if bsz % noise_repeat != 0:
18
+ raise ValueError(f"Batch size ({bsz}) must be divisible by noise repeat ({noise_repeat})")
19
+ _shape = (noise_repeat,) + shape[1:]
20
+ _tensor = torch.randn(_shape, device=device, dtype=dtype).repeat(bsz // noise_repeat, 1)
21
+ return _tensor
22
+
23
+
24
+ def rotate_half(x):
25
+ """Rotates half the hidden dims of the input."""
26
+ x1 = x[..., : x.shape[-1] // 2]
27
+ x2 = x[..., x.shape[-1] // 2 :]
28
+ return torch.cat((-x2, x1), dim=-1)
29
+
30
+
31
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
32
+ cos = cos.unsqueeze(unsqueeze_dim)
33
+ sin = sin.unsqueeze(unsqueeze_dim)
34
+ q_embed = (q * cos) + (rotate_half(q) * sin)
35
+ k_embed = (k * cos) + (rotate_half(k) * sin)
36
+ return q_embed, k_embed
37
+
38
+
39
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
40
+ """
41
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
42
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
43
+ """
44
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
45
+ if n_rep == 1:
46
+ return hidden_states
47
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
48
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
49
+
50
+
51
+ def identity(input: torch.Tensor, *args, **kwargs) -> torch.Tensor:
52
+ return input
53
+
54
+
55
+ def rms_norm(
56
+ input: torch.Tensor,
57
+ normalized_shape: torch.Size,
58
+ eps: float = 1e-6,
59
+ ) -> torch.Tensor:
60
+ dtype = input.dtype
61
+ input = input.to(torch.float32)
62
+ variance = input.flatten(-len(normalized_shape)).pow(2).mean(dim=-1)[(...,) + (None,) * len(normalized_shape)]
63
+ input = input * torch.rsqrt(variance + eps)
64
+ return input.to(dtype)
65
+
66
+
67
+ def layer_norm(
68
+ input: torch.Tensor,
69
+ normalized_shape: torch.Size,
70
+ eps: float = 1e-6,
71
+ ) -> torch.Tensor:
72
+ dtype = input.dtype
73
+ input = input.to(torch.float32)
74
+ mean = input.flatten(-len(normalized_shape)).mean(dim=-1)[(...,) + (None,) * len(normalized_shape)]
75
+ variance = (input - mean).flatten(-len(normalized_shape)).pow(2).mean(dim=-1)[(...,) + (None,) * len(normalized_shape)]
76
+ input = (input - mean) * torch.rsqrt(variance + eps)
77
+ return input.to(dtype)
vae/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99293255229a29297e2851858db3794497d1b0b09b20c308c1062636ea4bcdd9
3
+ size 335365010
vae/config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resolution": 256,
3
+ "in_channels": 3,
4
+ "ch": 128,
5
+ "out_ch": 3,
6
+ "ch_mult": [1, 2, 4, 4],
7
+ "num_res_blocks": 2,
8
+ "z_channels": 16,
9
+ "shift_factor": 0,
10
+ "scaling_factor": 1,
11
+ "deterministic": true,
12
+ "norm_fn": "layer_norm",
13
+ "norm_level": "channel",
14
+ "psz": 1
15
+ }
vae/nextstep_ae.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import inspect
4
+ from dataclasses import dataclass, field, asdict
5
+ from typing import Literal
6
+ from loguru import logger
7
+ from omegaconf import OmegaConf
8
+ from tabulate import tabulate
9
+ from einops import rearrange
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch import Tensor
14
+ from torch.utils.checkpoint import checkpoint
15
+
16
+ from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution
17
+ from diffusers.models.modeling_outputs import AutoencoderKLOutput
18
+
19
+ from utils.misc import LargeInt
20
+ from utils.model_utils import identity, rms_norm, layer_norm, randn_tensor, expand_t
21
+ from utils.compile_utils import smart_compile
22
+
23
+
24
+ @dataclass
25
+ class AutoEncoderParams:
26
+ resolution: int = 256
27
+ in_channels: int = 3
28
+ ch: int = 128
29
+ out_ch: int = 3
30
+ ch_mult: list[int] = field(default_factory=lambda: [1, 2, 4, 4])
31
+ num_res_blocks: int = 2
32
+ z_channels: int = 16
33
+ scaling_factor: float = 0.3611
34
+ shift_factor: float = 0.1159
35
+ deterministic: bool = False
36
+ norm_fn: Literal["layer_norm", "rms_norm"] | None = None
37
+ norm_level: Literal["latent", "channel"] = "latent"
38
+ psz: int | None = None
39
+
40
+
41
+ def swish(x: Tensor) -> Tensor:
42
+ return x * torch.sigmoid(x)
43
+
44
+
45
+ class AttnBlock(nn.Module):
46
+ def __init__(self, in_channels: int):
47
+ super().__init__()
48
+ self.in_channels = in_channels
49
+
50
+ self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
51
+
52
+ self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
53
+ self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
54
+ self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
55
+ self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
56
+
57
+ def attention(self, h_: Tensor) -> Tensor:
58
+ h_ = self.norm(h_)
59
+ q = self.q(h_)
60
+ k = self.k(h_)
61
+ v = self.v(h_)
62
+
63
+ b, c, h, w = q.shape
64
+ q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
65
+ k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
66
+ v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
67
+ h_ = nn.functional.scaled_dot_product_attention(q, k, v)
68
+
69
+ return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
70
+
71
+ def forward(self, x: Tensor) -> Tensor:
72
+ return x + self.proj_out(self.attention(x))
73
+
74
+
75
+ class ResnetBlock(nn.Module):
76
+ def __init__(self, in_channels: int, out_channels: int):
77
+ super().__init__()
78
+ self.in_channels = in_channels
79
+ out_channels = in_channels if out_channels is None else out_channels
80
+ self.out_channels = out_channels
81
+
82
+ self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
83
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
84
+ self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
85
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
86
+ if self.in_channels != self.out_channels:
87
+ self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
88
+
89
+ def forward(self, x):
90
+ h = x
91
+ h = self.norm1(h)
92
+ h = swish(h)
93
+ h = self.conv1(h)
94
+
95
+ h = self.norm2(h)
96
+ h = swish(h)
97
+ h = self.conv2(h)
98
+
99
+ if self.in_channels != self.out_channels:
100
+ x = self.nin_shortcut(x)
101
+
102
+ return x + h
103
+
104
+
105
+ class Downsample(nn.Module):
106
+ def __init__(self, in_channels: int):
107
+ super().__init__()
108
+ # no asymmetric padding in torch conv, must do it ourselves
109
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
110
+
111
+ def forward(self, x: Tensor):
112
+ pad = (0, 1, 0, 1)
113
+ x = nn.functional.pad(x, pad, mode="constant", value=0)
114
+ x = self.conv(x)
115
+ return x
116
+
117
+
118
+ class Upsample(nn.Module):
119
+ def __init__(self, in_channels: int):
120
+ super().__init__()
121
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
122
+
123
+ def forward(self, x: Tensor):
124
+ x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
125
+ x = self.conv(x)
126
+ return x
127
+
128
+
129
+ class Encoder(nn.Module):
130
+ def __init__(
131
+ self,
132
+ resolution: int,
133
+ in_channels: int,
134
+ ch: int,
135
+ ch_mult: list[int],
136
+ num_res_blocks: int,
137
+ z_channels: int,
138
+ ):
139
+ super().__init__()
140
+ self.ch = ch
141
+ self.num_resolutions = len(ch_mult)
142
+ self.num_res_blocks = num_res_blocks
143
+ self.resolution = resolution
144
+ self.in_channels = in_channels
145
+ # downsampling
146
+ self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
147
+
148
+ curr_res = resolution
149
+ in_ch_mult = (1,) + tuple(ch_mult)
150
+ self.in_ch_mult = in_ch_mult
151
+ self.down = nn.ModuleList()
152
+ block_in = self.ch
153
+ for i_level in range(self.num_resolutions):
154
+ block = nn.ModuleList()
155
+ attn = nn.ModuleList()
156
+ block_in = ch * in_ch_mult[i_level]
157
+ block_out = ch * ch_mult[i_level]
158
+ for _ in range(self.num_res_blocks):
159
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
160
+ block_in = block_out
161
+ down = nn.Module()
162
+ down.block = block
163
+ down.attn = attn
164
+ if i_level != self.num_resolutions - 1:
165
+ down.downsample = Downsample(block_in)
166
+ curr_res = curr_res // 2
167
+ self.down.append(down)
168
+
169
+ # middle
170
+ self.mid = nn.Module()
171
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
172
+ self.mid.attn_1 = AttnBlock(block_in)
173
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
174
+
175
+ # end
176
+ self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
177
+ self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
178
+
179
+ self.grad_checkpointing = False
180
+
181
+ @smart_compile()
182
+ def forward(self, x: Tensor) -> Tensor:
183
+ # downsampling
184
+ hs = [self.conv_in(x)]
185
+ for i_level in range(self.num_resolutions):
186
+ for i_block in range(self.num_res_blocks):
187
+ block_fn = self.down[i_level].block[i_block]
188
+ if self.grad_checkpointing:
189
+ h = checkpoint(block_fn, hs[-1])
190
+ else:
191
+ h = block_fn(hs[-1])
192
+ if len(self.down[i_level].attn) > 0:
193
+ attn_fn = self.down[i_level].attn[i_block]
194
+ if self.grad_checkpointing:
195
+ h = checkpoint(attn_fn, h)
196
+ else:
197
+ h = attn_fn(h)
198
+ hs.append(h)
199
+ if i_level != self.num_resolutions - 1:
200
+ hs.append(self.down[i_level].downsample(hs[-1]))
201
+
202
+ # middle
203
+ h = hs[-1]
204
+ h = self.mid.block_1(h)
205
+ h = self.mid.attn_1(h)
206
+ h = self.mid.block_2(h)
207
+ # end
208
+ h = self.norm_out(h)
209
+ h = swish(h)
210
+ h = self.conv_out(h)
211
+ return h
212
+
213
+
214
+ class Decoder(nn.Module):
215
+ def __init__(
216
+ self,
217
+ ch: int,
218
+ out_ch: int,
219
+ ch_mult: list[int],
220
+ num_res_blocks: int,
221
+ in_channels: int,
222
+ resolution: int,
223
+ z_channels: int,
224
+ ):
225
+ super().__init__()
226
+ self.ch = ch
227
+ self.num_resolutions = len(ch_mult)
228
+ self.num_res_blocks = num_res_blocks
229
+ self.resolution = resolution
230
+ self.in_channels = in_channels
231
+ self.ffactor = 2 ** (self.num_resolutions - 1)
232
+
233
+ # compute in_ch_mult, block_in and curr_res at lowest res
234
+ block_in = ch * ch_mult[self.num_resolutions - 1]
235
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
236
+ self.z_shape = (1, z_channels, curr_res, curr_res)
237
+
238
+ # z to block_in
239
+ self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
240
+
241
+ # middle
242
+ self.mid = nn.Module()
243
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
244
+ self.mid.attn_1 = AttnBlock(block_in)
245
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
246
+
247
+ # upsampling
248
+ self.up = nn.ModuleList()
249
+ for i_level in reversed(range(self.num_resolutions)):
250
+ block = nn.ModuleList()
251
+ attn = nn.ModuleList()
252
+ block_out = ch * ch_mult[i_level]
253
+ for _ in range(self.num_res_blocks + 1):
254
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
255
+ block_in = block_out
256
+ up = nn.Module()
257
+ up.block = block
258
+ up.attn = attn
259
+ if i_level != 0:
260
+ up.upsample = Upsample(block_in)
261
+ curr_res = curr_res * 2
262
+ self.up.insert(0, up) # prepend to get consistent order
263
+
264
+ # end
265
+ self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
266
+ self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
267
+
268
+ self.grad_checkpointing = False
269
+
270
+ @smart_compile()
271
+ def forward(self, z: Tensor) -> Tensor:
272
+ # get dtype for proper tracing
273
+ upscale_dtype = next(self.up.parameters()).dtype
274
+
275
+ # z to block_in
276
+ h = self.conv_in(z)
277
+
278
+ # middle
279
+ h = self.mid.block_1(h)
280
+ h = self.mid.attn_1(h)
281
+ h = self.mid.block_2(h)
282
+
283
+ # cast to proper dtype
284
+ h = h.to(upscale_dtype)
285
+ # upsampling
286
+ for i_level in reversed(range(self.num_resolutions)):
287
+ for i_block in range(self.num_res_blocks + 1):
288
+ block_fn = self.up[i_level].block[i_block]
289
+ if self.grad_checkpointing:
290
+ h = checkpoint(block_fn, h)
291
+ else:
292
+ h = block_fn(h)
293
+ if len(self.up[i_level].attn) > 0:
294
+ attn_fn = self.up[i_level].attn[i_block]
295
+ if self.grad_checkpointing:
296
+ h = checkpoint(attn_fn, h)
297
+ else:
298
+ h = attn_fn(h)
299
+ if i_level != 0:
300
+ h = self.up[i_level].upsample(h)
301
+
302
+ # end
303
+ h = self.norm_out(h)
304
+ h = swish(h)
305
+ h = self.conv_out(h)
306
+ return h
307
+
308
+
309
+ class AutoencoderKL(nn.Module):
310
+ def __init__(self, params: AutoEncoderParams):
311
+ super().__init__()
312
+ self.config = params
313
+ self.config = OmegaConf.create(asdict(self.config))
314
+ self.config.latent_channels = params.z_channels
315
+ self.config.block_out_channels = params.ch_mult
316
+
317
+ self.params = params
318
+ self.encoder = Encoder(
319
+ resolution=params.resolution,
320
+ in_channels=params.in_channels,
321
+ ch=params.ch,
322
+ ch_mult=params.ch_mult,
323
+ num_res_blocks=params.num_res_blocks,
324
+ z_channels=params.z_channels,
325
+ )
326
+ self.decoder = Decoder(
327
+ resolution=params.resolution,
328
+ in_channels=params.in_channels,
329
+ ch=params.ch,
330
+ out_ch=params.out_ch,
331
+ ch_mult=params.ch_mult,
332
+ num_res_blocks=params.num_res_blocks,
333
+ z_channels=params.z_channels,
334
+ )
335
+
336
+ self.psz = params.psz
337
+ # if self.psz is not None:
338
+ # logger.warning("psz has been deprecated, this is only used for hack's vae")
339
+
340
+ if params.norm_fn is None:
341
+ self.norm_fn = identity
342
+ elif params.norm_fn == "layer_norm":
343
+ self.norm_fn = layer_norm
344
+ elif params.norm_fn == "rms_norm":
345
+ self.norm_fn = rms_norm
346
+ else:
347
+ raise ValueError(f"Invalid norm_fn: {params.norm_fn}")
348
+ self.norm_level = params.norm_level
349
+
350
+ self.apply(self._init_weights)
351
+
352
+ def _init_weights(self, module):
353
+ std = 0.02
354
+ if isinstance(module, (nn.Conv2d, nn.Linear)):
355
+ module.weight.data.normal_(mean=0.0, std=std)
356
+ if module.bias is not None:
357
+ module.bias.data.zero_()
358
+ elif isinstance(module, nn.GroupNorm):
359
+ if module.weight is not None:
360
+ module.weight.data.fill_(1.0)
361
+ if module.bias is not None:
362
+ module.bias.data.zero_()
363
+
364
+ def gradient_checkpointing_enable(self):
365
+ self.encoder.grad_checkpointing = True
366
+ self.decoder.grad_checkpointing = True
367
+
368
+ @property
369
+ def dtype(self):
370
+ return self.encoder.conv_in.weight.dtype
371
+
372
+ @property
373
+ def device(self):
374
+ return self.encoder.conv_in.weight.device
375
+
376
+ @property
377
+ def trainable_params(self) -> float:
378
+ n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
379
+ return LargeInt(n_params)
380
+
381
+ @property
382
+ def params_info(self) -> str:
383
+ encoder_params = str(LargeInt(sum(p.numel() for p in self.encoder.parameters())))
384
+ decoder_params = str(LargeInt(sum(p.numel() for p in self.decoder.parameters())))
385
+ table = [["encoder", encoder_params], ["decoder", decoder_params]]
386
+ return tabulate(table, headers=["Module", "Params"], tablefmt="grid")
387
+
388
+ def get_last_layer(self):
389
+ return self.decoder.conv_out.weight
390
+
391
+ def patchify(self, img: torch.Tensor):
392
+ """
393
+ img: (bsz, C, H, W)
394
+ x: (bsz, patch_size**2 * C, H / patch_size, W / patch_size)
395
+ """
396
+ bsz, c, h, w = img.shape
397
+ p = self.psz
398
+ h_, w_ = h // p, w // p
399
+
400
+ img = img.reshape(bsz, c, h_, p, w_, p)
401
+ img = torch.einsum("nchpwq->ncpqhw", img)
402
+ x = img.reshape(bsz, c * p**2, h_, w_)
403
+ return x
404
+
405
+ def unpatchify(self, x: torch.Tensor):
406
+ """
407
+ x: (bsz, patch_size**2 * C, H / patch_size, W / patch_size)
408
+ img: (bsz, C, H, W)
409
+ """
410
+ bsz = x.shape[0]
411
+ p = self.psz
412
+ c = self.config.latent_channels
413
+ h_, w_ = x.shape[2], x.shape[3]
414
+
415
+ x = x.reshape(bsz, c, p, p, h_, w_)
416
+ x = torch.einsum("ncpqhw->nchpwq", x)
417
+ img = x.reshape(bsz, c, h_ * p, w_ * p)
418
+ return img
419
+
420
+ def encode(self, x: torch.Tensor, return_dict: bool = True):
421
+ moments = self.encoder(x)
422
+
423
+ if self.norm_fn is not None:
424
+ mean, logvar = torch.chunk(moments, 2, dim=1)
425
+ if self.psz is not None: # HACK
426
+ mean = self.patchify(mean)
427
+ if self.norm_level == "latent":
428
+ mean = self.norm_fn(mean, mean.size()[1:])
429
+ elif self.norm_level == "channel":
430
+ mean = mean.permute(0, 2, 3, 1) # [bsz, c, h, w] --> [bsz, h, w, c]
431
+ mean = self.norm_fn(mean, mean.size()[-1:]).permute(0, 3, 1, 2) # [bsz, h, w, c] --> [bsz, c, h, w]
432
+ if self.psz is not None: # HACK
433
+ mean = self.unpatchify(mean)
434
+ moments = torch.cat([mean, logvar], dim=1).contiguous()
435
+
436
+ posterior = DiagonalGaussianDistribution(moments, deterministic=self.params.deterministic)
437
+
438
+ if not return_dict:
439
+ return (posterior,)
440
+
441
+ return AutoencoderKLOutput(latent_dist=posterior)
442
+
443
+ def decode(self, z: torch.Tensor, return_dict: bool = True):
444
+ dec = self.decoder(z)
445
+
446
+ if not return_dict:
447
+ return (dec,)
448
+
449
+ return DecoderOutput(sample=dec)
450
+
451
+ def forward(
452
+ self,
453
+ input,
454
+ sample_posterior=True,
455
+ noise_strength=0.0,
456
+ interpolative_noise=False,
457
+ t_dist: Literal["uniform", "logitnormal"] = "logitnormal",
458
+ ):
459
+ posterior = self.encode(input).latent_dist
460
+ z = posterior.sample() if sample_posterior else posterior.mode()
461
+ if noise_strength > 0.0:
462
+ p = torch.distributions.Uniform(0, noise_strength)
463
+ z = z + p.sample((z.shape[0],)).reshape(-1, 1, 1, 1).to(z.device) * randn_tensor(
464
+ z.shape, device=z.device, dtype=z.dtype
465
+ )
466
+ if interpolative_noise:
467
+ z = self.patchify(z)
468
+ bsz, c, h, w = z.shape
469
+ z = z.permute(0, 2, 3, 1) # [bsz, h, w, c]
470
+ z = z.reshape(-1, c) # [bsz * h * w, c]
471
+
472
+ if t_dist == "logitnormal":
473
+ u = torch.normal(mean=0.0, std=1.0, size=(z.shape[0],))
474
+ t = (1 / (1 + torch.exp(-u))).to(z)
475
+ elif t_dist == "uniform":
476
+ t = torch.randn((z.shape[0],)).to(z)
477
+ else:
478
+ raise ValueError(f"Invalid t_dist: {t_dist}")
479
+
480
+ noise = torch.randn_like(z)
481
+ z = expand_t(t, z) * z + (1 - expand_t(t, z)) * noise
482
+
483
+ z = z.reshape(bsz, h, w, c).permute(0, 3, 1, 2)
484
+ z = self.unpatchify(z)
485
+
486
+ dec = self.decode(z).sample
487
+ return dec, posterior
488
+
489
+ @classmethod
490
+ def from_pretrained(cls, pretrained_model_name_or_path: str = "flux-vae", **kwargs):
491
+ config_path = None
492
+ ckpt_path = pretrained_model_name_or_path
493
+ if ckpt_path is not None and os.path.isdir(ckpt_path):
494
+ config_path = os.path.join(ckpt_path, "config.json")
495
+ ckpt_path = os.path.join(ckpt_path, "checkpoint.pt")
496
+ state_dict = torch.load(ckpt_path, map_location="cpu") if ckpt_path is not None else None
497
+
498
+ if kwargs is None:
499
+ kwargs = {}
500
+
501
+ if config_path is not None:
502
+ with open(config_path, "r") as f:
503
+ config: dict = json.load(f)
504
+ config.update(kwargs)
505
+ kwargs = config
506
+
507
+ # Filter out kwargs that are not in AutoEncoderParams
508
+ # This ensures we only pass parameters that the model can accept
509
+ valid_kwargs = {}
510
+ param_signature = inspect.signature(AutoEncoderParams.__init__).parameters
511
+ for key, value in kwargs.items():
512
+ if key in param_signature:
513
+ valid_kwargs[key] = value
514
+ else:
515
+ logger.info(f"Ignoring parameter '{key}' as it's not defined in AutoEncoderParams")
516
+
517
+ params = AutoEncoderParams(**valid_kwargs)
518
+ model = cls(params)
519
+ try:
520
+ msg = model.load_state_dict(state_dict, strict=False)
521
+ logger.info(f"Loaded state_dict from {ckpt_path}")
522
+ logger.info(f"Missing keys:\n{msg.missing_keys}")
523
+ logger.info(f"Unexpected keys:\n{msg.unexpected_keys}")
524
+ except Exception as e:
525
+ logger.error(e)
526
+ logger.warning(f"Failed to load state_dict from {ckpt_path}, using random initialization")
527
+ return model