hp-l33 commited on
Commit
d09f0be
·
1 Parent(s): ef784a3

Add pipeline

Browse files
Files changed (3) hide show
  1. arpg.py +636 -0
  2. pipeline.py +111 -0
  3. vq_model.py +459 -0
arpg.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # LlamaGen: https://github.com/FoundationVision/LlamaGen/
3
+ # YOCO: https://github.com/microsoft/unilm/tree/master/YOCO
4
+
5
+ import math
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from torch.nn import functional as F
11
+ from einops import rearrange
12
+ from typing import Dict, List, Optional
13
+ from dataclasses import dataclass
14
+ from transformers.configuration_utils import PretrainedConfig
15
+
16
+
17
+ def find_multiple(n: int, k: int):
18
+ if n % k == 0:
19
+ return n
20
+ return n + k - (n % k)
21
+
22
+
23
+ def batch_seq_shuffle(x, orders=None):
24
+ assert x.ndim >= 2, "The input should contain at least two dimensions, batch and length"
25
+ bs, seq_len = x.shape[:2]
26
+
27
+ if orders is None:
28
+ orders = torch.rand(bs, seq_len, device=x.device).argsort(dim=1)
29
+
30
+ orders_expand = orders.view(*orders.shape, *(1,) * (x.ndim - orders.ndim))
31
+ shuffled_data = torch.gather(x, 1, orders_expand.expand(*x.shape))
32
+
33
+ return shuffled_data, orders
34
+
35
+
36
+ # @dataclass
37
+ class ModelArgs(PretrainedConfig):
38
+ def __init__(
39
+ self,
40
+ dim: int = 4096,
41
+ n_layer: int = 32,
42
+ n_head: int = 32,
43
+ multiple_of: int = 256, # make SwiGLU hidden layer size multiple of large power of 2
44
+ ffn_dim_multiplier: Optional[float] = None,
45
+ rope_base: float = 10000,
46
+ norm_eps: float = 1e-5,
47
+ initializer_range: float = 0.02,
48
+ token_dropout_p: float = 0.1,
49
+ attn_dropout_p: float = 0.0,
50
+ resid_dropout_p: float = 0.1,
51
+ ffn_dropout_p: float = 0.1,
52
+ drop_path_rate: float = 0.0,
53
+ num_classes: int = 1000,
54
+ class_dropout_prob: float = 0.1,
55
+ model_type: str = 'c2i',
56
+ vocab_size: int = 16384,
57
+ cls_token_num: int = 1,
58
+ block_size: int = 256,
59
+ ):
60
+ self.dim = dim
61
+ self.n_layer = n_layer
62
+ self.n_head = n_head
63
+ self.multiple_of = multiple_of
64
+ self.ffn_dim_multiplier = ffn_dim_multiplier
65
+ self.rope_base = rope_base
66
+ self.norm_eps = norm_eps
67
+ self.initializer_range = initializer_range
68
+
69
+ self.token_dropout_p = token_dropout_p
70
+ self.attn_dropout_p = attn_dropout_p
71
+ self.resid_dropout_p = resid_dropout_p
72
+ self.ffn_dropout_p = ffn_dropout_p
73
+ self.drop_path_rate = drop_path_rate
74
+
75
+ self.num_classes = num_classes
76
+ self.class_dropout_prob = class_dropout_prob
77
+ self.model_type = model_type
78
+ self.vocab_size = vocab_size
79
+ self.cls_token_num = cls_token_num
80
+ self.block_size = block_size
81
+
82
+
83
+ class RMSNorm(torch.nn.Module):
84
+ def __init__(self, dim: int, eps: float = 1e-5):
85
+ super().__init__()
86
+ self.eps = eps
87
+ self.weight = nn.Parameter(torch.ones(dim))
88
+
89
+ def _norm(self, x):
90
+ return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
91
+
92
+ def forward(self, x):
93
+ output = self._norm(x.float()).type_as(x)
94
+ return output * self.weight
95
+
96
+
97
+ class FeedForward(nn.Module):
98
+ def __init__(self, config: ModelArgs):
99
+ super().__init__()
100
+ hidden_dim = 4 * config.dim
101
+ hidden_dim = int(2 * hidden_dim / 3)
102
+ # custom dim factor multiplier
103
+ if config.ffn_dim_multiplier is not None:
104
+ hidden_dim = int(config.ffn_dim_multiplier * hidden_dim)
105
+ hidden_dim = find_multiple(hidden_dim, config.multiple_of)
106
+
107
+ self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
108
+ self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
109
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
110
+ self.ffn_dropout = nn.Dropout(config.ffn_dropout_p)
111
+
112
+ def forward(self, x):
113
+ return self.ffn_dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
114
+
115
+
116
+ class Attention(nn.Module):
117
+ def __init__(self, config: ModelArgs):
118
+ super().__init__()
119
+ assert config.dim % config.n_head == 0
120
+ self.dim = config.dim
121
+ self.n_head = config.n_head
122
+ self.head_dim = config.dim // config.n_head
123
+
124
+ self.to_q = nn.Linear(config.dim, config.dim, bias=False)
125
+ self.to_k = nn.Linear(config.dim, config.dim, bias=False)
126
+ self.to_v = nn.Linear(config.dim, config.dim, bias=False)
127
+
128
+ self.proj = nn.Linear(config.dim, config.dim, bias=False)
129
+
130
+ self.attn_drop = config.attn_dropout_p
131
+ self.proj_drop = nn.Dropout(config.resid_dropout_p)
132
+
133
+ self.kv_cache = False
134
+ self.k_cache = None
135
+ self.v_cache = None
136
+
137
+ def reset_kv_cache(self):
138
+ self.k_cache = None
139
+ self.v_cache = None
140
+
141
+ def update_kv_cache(self, k: torch.Tensor, v: torch.Tensor):
142
+ if self.k_cache is None and self.v_cache is None:
143
+ k_cache = k
144
+ v_cache = v
145
+ else:
146
+ k_cache = torch.cat([self.k_cache, k], dim=-2)
147
+ v_cache = torch.cat([self.v_cache, v], dim=-2)
148
+
149
+ self.k_cache = k_cache
150
+ self.v_cache = v_cache
151
+
152
+ return k_cache, v_cache
153
+
154
+ def forward(
155
+ self,
156
+ x: torch.Tensor,
157
+ freqs_cis: torch.Tensor = None
158
+ ):
159
+
160
+ q, k, v = self.to_q(x), self.to_k(x), self.to_v(x)
161
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=self.n_head), (q, k, v))
162
+
163
+ q = apply_rotary_emb(q, freqs_cis)
164
+ k = apply_rotary_emb(k, freqs_cis)
165
+
166
+ q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v))
167
+
168
+ if self.kv_cache:
169
+ k, v = self.update_kv_cache(k, v)
170
+
171
+ output = F.scaled_dot_product_attention(
172
+ q, k, v,
173
+ attn_mask=None,
174
+ is_causal=True if self.training else False,
175
+ dropout_p=self.attn_drop if self.training else 0
176
+ )
177
+ output = rearrange(output, 'b h n d -> b n (h d)').contiguous()
178
+ output = self.proj_drop(self.proj(output))
179
+ return output
180
+
181
+
182
+ class CrossAttention(nn.Module):
183
+ def __init__(self, config: ModelArgs):
184
+ super().__init__()
185
+ assert config.dim % config.n_head == 0
186
+ self.dim = config.dim
187
+ self.n_head = config.n_head
188
+ self.head_dim = config.dim // config.n_head
189
+
190
+ self.to_q = nn.Linear(config.dim, config.dim, bias=False)
191
+
192
+ self.proj = nn.Linear(config.dim, config.dim, bias=False)
193
+
194
+ self.attn_drop = config.attn_dropout_p
195
+ self.proj_drop = nn.Dropout(config.resid_dropout_p)
196
+
197
+ self.kv_cache = False
198
+ self.k_cache = None
199
+ self.v_cache = None
200
+
201
+ def reset_kv_cache(self):
202
+ self.k_cache = None
203
+ self.v_cache = None
204
+
205
+ def update_kv_cache(self, k: torch.Tensor, v: torch.Tensor):
206
+ if self.k_cache is None and self.v_cache is None:
207
+ k_cache = k
208
+ v_cache = v
209
+ else:
210
+ k_cache = torch.cat([self.k_cache, k], dim=-2)
211
+ v_cache = torch.cat([self.v_cache, v], dim=-2)
212
+
213
+ self.k_cache = k_cache
214
+ self.v_cache = v_cache
215
+
216
+ return k_cache, v_cache
217
+
218
+ def forward(
219
+ self,
220
+ x: torch.Tensor,
221
+ k: torch.Tensor,
222
+ v: torch.Tensor,
223
+ freqs_cis: torch.Tensor = None
224
+ ):
225
+ q = self.to_q(x)
226
+ q = rearrange(q, 'b n (h d) -> b n h d', h=self.n_head)
227
+
228
+ # target-aware
229
+ q = apply_rotary_emb(q, freqs_cis[:, -q.shape[1]:, ...])
230
+
231
+ q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v))
232
+
233
+ if self.kv_cache:
234
+ k, v = self.update_kv_cache(k, v)
235
+
236
+ output = F.scaled_dot_product_attention(
237
+ q, k, v,
238
+ attn_mask=None,
239
+ is_causal=True if self.training else False,
240
+ dropout_p=self.attn_drop if self.training else 0
241
+ )
242
+ output = rearrange(output, 'b h n d -> b n (h d)').contiguous()
243
+ output = self.proj_drop(self.proj(output))
244
+ return output
245
+
246
+
247
+ class SelfDecoder(nn.Module):
248
+ def __init__(self, config: ModelArgs):
249
+ super().__init__()
250
+ self.attn = Attention(config)
251
+ self.ffn = FeedForward(config)
252
+
253
+ self.attn_norm = RMSNorm(config.dim, eps=config.norm_eps)
254
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
255
+
256
+ def forward(
257
+ self,
258
+ x: torch.Tensor,
259
+ freqs_cis: torch.Tensor = None
260
+ ):
261
+ h = x + self.attn(x=self.attn_norm(x), freqs_cis=freqs_cis[:, :x.shape[1], ...])
262
+ out = h + self.ffn(self.ffn_norm(h))
263
+
264
+ return out
265
+
266
+
267
+ class CrossDecoder(nn.Module):
268
+ def __init__(self, config: ModelArgs):
269
+ super().__init__()
270
+ self.attn = CrossAttention(config)
271
+ self.ffn = FeedForward(config)
272
+
273
+ self.attn_norm = RMSNorm(config.dim, eps=config.norm_eps)
274
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
275
+
276
+ def forward(
277
+ self,
278
+ x: torch.Tensor,
279
+ k: torch.Tensor,
280
+ v: torch.Tensor,
281
+ freqs_cis: torch.Tensor = None
282
+ ):
283
+ h = x + self.attn(x=self.attn_norm(x), k=k, v=v, freqs_cis=freqs_cis)
284
+ out = h + self.ffn(self.ffn_norm(h))
285
+
286
+ return out
287
+
288
+
289
+ class Decoder_Decoder(nn.Module):
290
+ def __init__(self, config: ModelArgs, n_layer):
291
+ super().__init__()
292
+ self.config = config
293
+ self.self_dec = nn.ModuleList([SelfDecoder(config) for _ in range(n_layer//2)])
294
+ self.cross_dec = nn.ModuleList([CrossDecoder(config) for _ in range(n_layer//2)])
295
+
296
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
297
+ self.to_k = nn.Linear(config.dim, config.dim, bias=False)
298
+ self.to_v = nn.Linear(config.dim, config.dim, bias=False)
299
+
300
+ self.kv_cache = False
301
+ self.k_cache = None
302
+ self.v_cache = None
303
+
304
+ def reset_kv_cache(self):
305
+ self.k_cache = None
306
+ self.v_cache = None
307
+
308
+ def update_kv_cache(self, k: torch.Tensor, v: torch.Tensor, head_first=False):
309
+ t_dim = 2 if head_first else 1
310
+
311
+ if self.k_cache is None and self.v_cache is None:
312
+ k_cache = k
313
+ v_cache = v
314
+ else:
315
+ k_cache = torch.cat([self.k_cache, k], dim=t_dim)
316
+ v_cache = torch.cat([self.v_cache, v], dim=t_dim)
317
+
318
+ self.k_cache = k_cache
319
+ self.v_cache = v_cache
320
+
321
+ return k_cache, v_cache
322
+
323
+ def forward(
324
+ self,
325
+ x: torch.Tensor,
326
+ q: torch.Tensor,
327
+ freqs_cis: torch.Tensor = None
328
+ ):
329
+ for layer in self.self_dec:
330
+ x = layer(x=x, freqs_cis=freqs_cis)
331
+
332
+ x_norm = self.norm(x)
333
+ k = self.to_k(x_norm)
334
+ v = self.to_v(x_norm)
335
+
336
+ k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=self.config.n_head), (k, v))
337
+ k = apply_rotary_emb(k, freqs_cis[:, :k.shape[1], ...])
338
+
339
+ if self.kv_cache:
340
+ k, v = self.update_kv_cache(k, v)
341
+
342
+ for layer in self.cross_dec:
343
+ q = layer(x=q, k=k, v=v, freqs_cis=freqs_cis)
344
+
345
+ return q
346
+
347
+
348
+ class Transformer(nn.Module):
349
+ def __init__(self, config: ModelArgs):
350
+ super().__init__()
351
+ self.config = config
352
+ self.image_seq_len = config.block_size
353
+
354
+ """
355
+ ref: https://github.com/bytedance/1d-tokenizer/blob/main/modeling/rar.py
356
+ Token space:
357
+ [0, vocab_size - 1] : those are the learned quantized image tokens
358
+ [vocab_size] : the mask token id
359
+ [vocab_size + 1, vocab_size + num_classes] : the imagenet class tokens
360
+ [vocab_size + num_classes + 1] : the class drop label
361
+ [vocab_size + num_classes + 2] : the drop token for scg
362
+ """
363
+ self.embeddings = nn.Embedding(config.vocab_size + 1 + config.num_classes + 1 + 1, config.dim)
364
+ self.embed_drop = nn.Dropout(config.token_dropout_p)
365
+
366
+ self.mask_token_id = config.vocab_size
367
+ self.none_conds_id = config.vocab_size + config.num_classes + 1
368
+ self.none_token_id = config.vocab_size + config.num_classes + 2
369
+
370
+ # 2-pass decoder
371
+ self.layers = Decoder_Decoder(config, config.n_layer)
372
+
373
+ # output layer
374
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
375
+ self.head = nn.Linear(config.dim, config.vocab_size, bias=False)
376
+
377
+ # 2d rotary pos embedding
378
+ grid_size = int(self.image_seq_len ** 0.5)
379
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, config.dim // config.n_head, config.rope_base, config.cls_token_num)
380
+
381
+ self.initialize_weights()
382
+
383
+ def initialize_weights(self):
384
+ # Initialize nn.Linear and nn.Embedding
385
+ self.apply(self._init_weights)
386
+
387
+ # Zero-out output layers:
388
+ nn.init.constant_(self.head.weight, 0)
389
+
390
+ def _init_weights(self, module):
391
+ std = self.config.initializer_range
392
+ if isinstance(module, nn.Linear):
393
+ module.weight.data.normal_(mean=0.0, std=std)
394
+ if module.bias is not None:
395
+ module.bias.data.zero_()
396
+ elif isinstance(module, nn.Embedding):
397
+ module.weight.data.normal_(mean=0.0, std=std)
398
+
399
+ def setup_kv_cache(self, enable=True):
400
+ for block in self.layers.self_dec:
401
+ block.attn.kv_cache = enable
402
+ block.attn.reset_kv_cache()
403
+
404
+ self.layers.kv_cache = enable
405
+ self.layers.reset_kv_cache()
406
+
407
+ def preprocess_condition(self, condition, cond_drop_prob=0.0):
408
+ # Set class condition to None condition
409
+ drop_label_mask = torch.rand_like(condition, dtype=torch.float) < cond_drop_prob
410
+ condition = condition + self.config.vocab_size + 1 # [0, 999] -> [codebook_size + 1, codebook_size + 999]
411
+ condition[drop_label_mask] = self.none_conds_id
412
+
413
+ if condition.ndim == 1:
414
+ condition = condition.unsqueeze(-1)
415
+
416
+ return condition
417
+
418
+ def forward_shared(self, input_ids, freqs_cis, num_query=None):
419
+ embedds = self.embeddings(input_ids)
420
+
421
+ x = self.embed_drop(embedds)
422
+ num_query = input_ids.shape[-1] if num_query == None else num_query
423
+ queries = self.embeddings(torch.full((input_ids.shape[0], num_query), self.mask_token_id, device=input_ids.device))
424
+
425
+ x = self.layers(x, queries, freqs_cis=freqs_cis)
426
+ logits = self.head(self.norm(x)).float()
427
+
428
+ return logits
429
+
430
+ def forward(self, input_ids, condition, targets=None, debug=False):
431
+ # shift class id and dropout for classifier-free guidance
432
+ condition = self.preprocess_condition(condition, cond_drop_prob=self.config.class_dropout_prob)
433
+
434
+ # shuffle input
435
+ shuffled_ids, orders = batch_seq_shuffle(input_ids)
436
+
437
+ # shuffle RoPE
438
+ freqs_cis = self.freqs_cis.unsqueeze(0).repeat(input_ids.shape[0], 1, 1, 1).to(input_ids.device)
439
+ fixed_freqs_cis = freqs_cis[:, :1, ...]
440
+ shuffled_freqs_cis = batch_seq_shuffle(freqs_cis[:, 1:, ...], orders)[0]
441
+ freqs_cis = torch.cat([fixed_freqs_cis, shuffled_freqs_cis], dim=1)
442
+
443
+ # teacher-forcing input
444
+ logits = self.forward_shared(torch.cat([condition, shuffled_ids[:, :-1]], dim=-1), freqs_cis)
445
+
446
+ loss = None
447
+ if targets is not None:
448
+ targets = batch_seq_shuffle(targets, orders)[0]
449
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
450
+
451
+ return logits, loss
452
+
453
+ @torch.inference_mode()
454
+ def generate(
455
+ self,
456
+ condition,
457
+ guidance_scale=4.0,
458
+ cfg_schedule='linear',
459
+ sample_schedule='arccos',
460
+ temperature=1.0,
461
+ top_k=0,
462
+ top_p=1,
463
+ seq_len=256,
464
+ num_iter=64,
465
+ ):
466
+ device = condition.device
467
+ num_samples = condition.shape[0]
468
+ freqs_cis_ = self.freqs_cis.unsqueeze(0).to(device)
469
+
470
+ # shift condition id
471
+ condition = self.preprocess_condition(condition, cond_drop_prob=0.0)
472
+
473
+ # generate a random order
474
+ orders = torch.rand(256, device=device).argsort(dim=0) + 1
475
+
476
+ last_pos = 0
477
+ last_range = range(0, 1) # for class token, hardcode
478
+ sequences = []
479
+
480
+ self.setup_kv_cache(enable=True)
481
+ for step in range(num_iter):
482
+ if sample_schedule == 'arccos':
483
+ mask_ratio = np.arccos(1. * (step + 1) / num_iter) / (math.pi * 0.5)
484
+ elif sample_schedule == 'cosine':
485
+ mask_ratio = np.cos(math.pi / 2. * (step + 1) / num_iter)
486
+ else:
487
+ raise NotImplementedError
488
+
489
+ mask_len = int(seq_len * mask_ratio)
490
+ mask_len = max(1, min(seq_len - last_pos - 1, mask_len))
491
+
492
+ num_pred = seq_len - last_pos - mask_len
493
+ if step == num_iter - 1:
494
+ num_pred = seq_len - last_pos
495
+
496
+ next_range = orders[range(last_pos, last_pos + num_pred)]
497
+ last_pos += num_pred
498
+
499
+ if cfg_schedule == 'linear':
500
+ cfg_scale = 1.0 + (guidance_scale - 1.0) * last_pos / seq_len
501
+ elif cfg_schedule == 'constant':
502
+ cfg_scale = guidance_scale
503
+ else:
504
+ raise NotImplementedError
505
+
506
+ """
507
+ 1. Since the cached key has already had rotary embedding applied,
508
+ we only need to input the current position's frequencies for key.
509
+ 2. We need the next position's frequencies for query to achieve target-aware guidance.
510
+ """
511
+ freqs_cis = torch.cat([
512
+ freqs_cis_[:, last_range, ...],
513
+ freqs_cis_[:, next_range, ...]], dim=1
514
+ )
515
+ if guidance_scale != 0:
516
+ if step == 0:
517
+ input_ids = torch.cat([condition, torch.full_like(condition, self.none_conds_id)], dim=0)
518
+ else:
519
+ input_ids = torch.cat([sequences[-1], sequences[-1]], dim=0)
520
+
521
+ logits = self.forward_shared(input_ids, freqs_cis, num_pred)
522
+ cond_logits, uncond_logits = logits[:num_samples], logits[num_samples:]
523
+ logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
524
+ else:
525
+ raise NotImplementedError
526
+
527
+ # keep the logits of last n-tokens
528
+ logits = logits[:, -num_pred:] / max(temperature, 1e-5)
529
+
530
+ if top_k > 0 or top_p < 1.0:
531
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
532
+
533
+ probs = F.softmax(logits, dim=-1)
534
+ sampled = torch.multinomial(probs.flatten(0, 1), num_samples=1)
535
+ sequences.append(sampled.reshape(num_samples, -1))
536
+
537
+ last_range = next_range
538
+
539
+ self.setup_kv_cache(enable=False)
540
+
541
+ sequences = torch.cat(sequences, dim=-1)
542
+ return sequences[:, orders.argsort(dim=0)]
543
+
544
+
545
+ # https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
546
+ def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000, cls_token_num=120):
547
+ freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
548
+ t = torch.arange(seq_len, device=freqs.device)
549
+ freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2)
550
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
551
+ cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1) # (cls_token_num+seq_len, head_dim // 2, 2)
552
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+seq_len, head_dim // 2, 2)
553
+ return cond_cache
554
+
555
+
556
+ def precompute_freqs_cis_2d(grid_size: int, n_elem: int, base: int = 10000, cls_token_num=120):
557
+ # split the dimension into half, one for x and one for y
558
+ half_dim = n_elem // 2
559
+ freqs = 1.0 / (base ** (torch.arange(0, half_dim, 2)[: (half_dim // 2)].float() / half_dim))
560
+ t = torch.arange(grid_size, device=freqs.device)
561
+ freqs = torch.outer(t, freqs) # (grid_size, head_dim // 2)
562
+ freqs_grid = torch.concat([
563
+ freqs[:, None, :].expand(-1, grid_size, -1),
564
+ freqs[None, :, :].expand(grid_size, -1, -1),
565
+ ], dim=-1) # (grid_size, grid_size, head_dim // 2)
566
+ cache_grid = torch.stack([torch.cos(freqs_grid), torch.sin(freqs_grid)], dim=-1) # (grid_size, grid_size, head_dim // 2, 2)
567
+ cache = cache_grid.flatten(0, 1)
568
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+grid_size**2, head_dim // 2, 2)
569
+ return cond_cache
570
+
571
+
572
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor):
573
+ # x: (bs, seq_len, n_head, head_dim)
574
+ # freqs_cis (seq_len, head_dim // 2, 2)
575
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
576
+ freqs_cis = freqs_cis.view(-1, xshaped.size(1), 1, xshaped.size(3), 2) # (1, seq_len, 1, head_dim//2, 2)
577
+ x_out2 = torch.stack([
578
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
579
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
580
+ ], dim=-1)
581
+ x_out2 = x_out2.flatten(3)
582
+ return x_out2.type_as(x)
583
+
584
+
585
+ def top_k_top_p_filtering(
586
+ logits,
587
+ top_k: int = 0,
588
+ top_p: float = 1.0,
589
+ filter_value: float = -float("Inf"),
590
+ min_tokens_to_keep: int = 1,
591
+ ):
592
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
593
+ Args:
594
+ logits: logits distribution shape (batch size, vocabulary size)
595
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
596
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
597
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
598
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
599
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
600
+ """
601
+ if top_k > 0:
602
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
603
+ # Remove all tokens with a probability less than the last token of the top-k
604
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
605
+ logits[indices_to_remove] = filter_value
606
+
607
+ if top_p < 1.0:
608
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
609
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
610
+
611
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
612
+ sorted_indices_to_remove = cumulative_probs > top_p
613
+ if min_tokens_to_keep > 1:
614
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
615
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
616
+ # Shift the indices to the right to keep also the first token above the threshold
617
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
618
+ sorted_indices_to_remove[..., 0] = 0
619
+
620
+ # scatter sorted tensors to original indexing
621
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
622
+ logits[indices_to_remove] = filter_value
623
+ return logits
624
+
625
+
626
+ def ARPG_XXL(**kwargs):
627
+ return Transformer(ModelArgs(n_layer=48, n_head=24, dim=1536, **kwargs))
628
+
629
+ def ARPG_XL(**kwargs):
630
+ return Transformer(ModelArgs(n_layer=36, n_head=20, dim=1280, **kwargs))
631
+
632
+ def ARPG_L(**kwargs):
633
+ return Transformer(ModelArgs(n_layer=24, n_head=16, dim=1024, **kwargs))
634
+
635
+
636
+ ARPG_models = {'ARPG-L': ARPG_L, 'ARPG-XL': ARPG_XL, 'ARPG-XXL': ARPG_XXL}
pipeline.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline
2
+ import torch
3
+ import numpy as np
4
+ import importlib.util
5
+ import sys
6
+ from huggingface_hub import hf_hub_download
7
+ from safetensors.torch import load_file
8
+ import os
9
+ from torchvision.utils import save_image
10
+ from PIL import Image
11
+ from safetensors.torch import load_file
12
+ from .vq_model import VQ_models
13
+ from .arpg import ARPG_models
14
+
15
+ # inheriting from DiffusionPipeline for HF
16
+ class ARPGModel(DiffusionPipeline):
17
+
18
+ def __init__(self):
19
+ super().__init__()
20
+
21
+ @torch.no_grad()
22
+ def __call__(self, *args, **kwargs):
23
+ """
24
+ This method downloads the model and VAE components,
25
+ then executes the forward pass based on the user's input.
26
+ """
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+
29
+ # init the mar model architecture
30
+ model_type = kwargs.get("model_type", "ARPG-XXL")
31
+
32
+ # download the pretrained model and set diffloss parameters
33
+ if model_type == "ARPG-L":
34
+ model_path = "arpg_300m.pt"
35
+ elif model_type == "ARPG-XL":
36
+ model_path = "arpg_700m.pt"
37
+ elif model_type == "ARPG-XXL":
38
+ model_path = "arpg_1b.pt"
39
+ else:
40
+ raise NotImplementedError
41
+ # download and load the model weights (.safetensors or .pth)
42
+ model_checkpoint_path = hf_hub_download(
43
+ repo_id=kwargs.get("repo_id", "hp-l33/ARPG"),
44
+ filename=kwargs.get("model_filename", model_path)
45
+ )
46
+
47
+ model_fn = ARPG_models[model_type]
48
+
49
+ model = model_fn(
50
+ num_classes=1000,
51
+ vocab_size=16384
52
+ ).cuda()
53
+
54
+ # use safetensors
55
+ state_dict = load_file(model_checkpoint_path)['state_dict']
56
+ model.load_state_dict(state_dict)
57
+ model.eval()
58
+
59
+ # download and load the vae
60
+ vae_checkpoint_path = hf_hub_download(
61
+ repo_id=kwargs.get("repo_id", "FoundationVision/LlamaGen"),
62
+ filename=kwargs.get("vae_filename", "vq_ds16_c2i.pt")
63
+ )
64
+
65
+ vae = VQ_models['VQ-16']()
66
+
67
+ vae_state_dict = load_file(vae_checkpoint_path)['model']
68
+ vae.load_state_dict(vae_state_dict)
69
+ vae = vae.to(device).eval()
70
+
71
+ # set up user-specified or default values for generation
72
+ seed = kwargs.get("seed", 6)
73
+ torch.manual_seed(seed)
74
+ np.random.seed(seed)
75
+
76
+ num_steps = kwargs.get("num_steps", 64)
77
+ cfg_scale = kwargs.get("cfg_scale", 4)
78
+ cfg_schedule = kwargs.get("cfg_schedule", "constant")
79
+ sample_schedule = kwargs.get("sample_schedule", "arccos")
80
+ temperature = kwargs.get("temperature", 1.0)
81
+ top_k = kwargs.get("top_k", 600)
82
+ class_labels = kwargs.get("class_labels", [207, 360, 388, 113, 355, 980, 323, 979])
83
+
84
+ # generate the tokens and images
85
+ with torch.cuda.amp.autocast():
86
+ sampled_tokens = model.generate(
87
+ condition=torch.Tensor(class_labels).long().cuda(),
88
+ num_iter=num_steps,
89
+ guidance_scale=cfg_scale,
90
+ cfg_schedule=cfg_schedule,
91
+ sample_schedule=sample_schedule,
92
+ temperature=temperature,
93
+ top_k=top_k,
94
+ )
95
+ sampled_images = vae.decode_code(sampled_tokens, shape=(len(class_labels), 8, 16, 16))
96
+
97
+ output_dir = kwargs.get("output_dir", "./")
98
+ os.makedirs(output_dir, exist_ok=True)
99
+
100
+ # save the images
101
+ image_path = os.path.join(output_dir, "sampled_image.png")
102
+ samples_per_row = kwargs.get("samples_per_row", 4)
103
+
104
+ save_image(
105
+ torch.clamp(127.5 * sampled_images + 128.0, 0, 255), image_path, nrow=int(samples_per_row), normalize=False
106
+ )
107
+
108
+ # return as a pil image
109
+ image = Image.open(image_path)
110
+
111
+ return image
vq_model.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # taming-transformers: https://github.com/CompVis/taming-transformers
3
+ # maskgit: https://github.com/google-research/maskgit
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import List
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from einops import rearrange, reduce
12
+
13
+
14
+ @dataclass
15
+ class ModelArgs:
16
+ codebook_size: int = 16384
17
+ codebook_embed_dim: int = 8
18
+ codebook_l2_norm: bool = True
19
+ codebook_show_usage: bool = True
20
+ commit_loss_beta: float = 0.25
21
+ entropy_loss_ratio: float = 0.0
22
+
23
+ encoder_ch_mult: List[int] = field(default_factory=lambda: [1, 1, 2, 2, 4])
24
+ decoder_ch_mult: List[int] = field(default_factory=lambda: [1, 1, 2, 2, 4])
25
+ z_channels: int = 256
26
+ dropout_p: float = 0.0
27
+ num_res_blocks: int = 4
28
+
29
+
30
+
31
+ class VQModel(nn.Module):
32
+ def __init__(self, config: ModelArgs):
33
+ super().__init__()
34
+ self.config = config
35
+ self.encoder = Encoder(ch_mult=config.encoder_ch_mult, z_channels=config.z_channels, dropout=config.dropout_p)
36
+ self.decoder = Decoder(ch_mult=config.decoder_ch_mult, z_channels=config.z_channels, dropout=config.dropout_p)
37
+
38
+ self.quantize = VectorQuantizer(config.codebook_size, config.codebook_embed_dim,
39
+ config.commit_loss_beta, config.entropy_loss_ratio,
40
+ config.codebook_l2_norm, config.codebook_show_usage)
41
+ self.quant_conv = nn.Conv2d(config.z_channels, config.codebook_embed_dim, 1)
42
+ self.post_quant_conv = nn.Conv2d(config.codebook_embed_dim, config.z_channels, 1)
43
+
44
+ def encode(self, x):
45
+ h = self.encoder(x)
46
+ h = self.quant_conv(h)
47
+ quant, emb_loss, info = self.quantize(h)
48
+ return quant, emb_loss, info
49
+
50
+ def decode(self, quant):
51
+ quant = self.post_quant_conv(quant)
52
+ dec = self.decoder(quant)
53
+ return dec
54
+
55
+ def decode_code(self, code_b, shape=None, channel_first=True):
56
+ quant_b = self.quantize.get_codebook_entry(code_b, shape, channel_first)
57
+ dec = self.decode(quant_b)
58
+ return dec
59
+
60
+ def forward(self, input):
61
+ quant, diff, _ = self.encode(input)
62
+ dec = self.decode(quant)
63
+ return dec, diff
64
+
65
+
66
+ class Encoder(nn.Module):
67
+ def __init__(self, in_channels=3, ch=128, ch_mult=(1,1,2,2,4), num_res_blocks=2,
68
+ norm_type='group', dropout=0.0, resamp_with_conv=True, z_channels=256):
69
+ super().__init__()
70
+ self.num_resolutions = len(ch_mult)
71
+ self.num_res_blocks = num_res_blocks
72
+ self.conv_in = nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1)
73
+
74
+ # downsampling
75
+ in_ch_mult = (1,) + tuple(ch_mult)
76
+ self.conv_blocks = nn.ModuleList()
77
+ for i_level in range(self.num_resolutions):
78
+ conv_block = nn.Module()
79
+ # res & attn
80
+ res_block = nn.ModuleList()
81
+ attn_block = nn.ModuleList()
82
+ block_in = ch*in_ch_mult[i_level]
83
+ block_out = ch*ch_mult[i_level]
84
+ for _ in range(self.num_res_blocks):
85
+ res_block.append(ResnetBlock(block_in, block_out, dropout=dropout, norm_type=norm_type))
86
+ block_in = block_out
87
+ if i_level == self.num_resolutions - 1:
88
+ attn_block.append(AttnBlock(block_in, norm_type))
89
+ conv_block.res = res_block
90
+ conv_block.attn = attn_block
91
+ # downsample
92
+ if i_level != self.num_resolutions-1:
93
+ conv_block.downsample = Downsample(block_in, resamp_with_conv)
94
+ self.conv_blocks.append(conv_block)
95
+
96
+ # middle
97
+ self.mid = nn.ModuleList()
98
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
99
+ self.mid.append(AttnBlock(block_in, norm_type=norm_type))
100
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
101
+
102
+ # end
103
+ self.norm_out = Normalize(block_in, norm_type)
104
+ self.conv_out = nn.Conv2d(block_in, z_channels, kernel_size=3, stride=1, padding=1)
105
+
106
+
107
+ def forward(self, x):
108
+ h = self.conv_in(x)
109
+ # downsampling
110
+ for i_level, block in enumerate(self.conv_blocks):
111
+ for i_block in range(self.num_res_blocks):
112
+ h = block.res[i_block](h)
113
+ if len(block.attn) > 0:
114
+ h = block.attn[i_block](h)
115
+ if i_level != self.num_resolutions - 1:
116
+ h = block.downsample(h)
117
+
118
+ # middle
119
+ for mid_block in self.mid:
120
+ h = mid_block(h)
121
+
122
+ # end
123
+ h = self.norm_out(h)
124
+ h = nonlinearity(h)
125
+ h = self.conv_out(h)
126
+ return h
127
+
128
+
129
+ class Decoder(nn.Module):
130
+ def __init__(self, z_channels=256, ch=128, ch_mult=(1,1,2,2,4), num_res_blocks=2, norm_type="group",
131
+ dropout=0.0, resamp_with_conv=True, out_channels=3):
132
+ super().__init__()
133
+ self.num_resolutions = len(ch_mult)
134
+ self.num_res_blocks = num_res_blocks
135
+
136
+ block_in = ch*ch_mult[self.num_resolutions-1]
137
+ # z to block_in
138
+ self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
139
+
140
+ # middle
141
+ self.mid = nn.ModuleList()
142
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
143
+ self.mid.append(AttnBlock(block_in, norm_type=norm_type))
144
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
145
+
146
+ # upsampling
147
+ self.conv_blocks = nn.ModuleList()
148
+ for i_level in reversed(range(self.num_resolutions)):
149
+ conv_block = nn.Module()
150
+ # res & attn
151
+ res_block = nn.ModuleList()
152
+ attn_block = nn.ModuleList()
153
+ block_out = ch*ch_mult[i_level]
154
+ for _ in range(self.num_res_blocks + 1):
155
+ res_block.append(ResnetBlock(block_in, block_out, dropout=dropout, norm_type=norm_type))
156
+ block_in = block_out
157
+ if i_level == self.num_resolutions - 1:
158
+ attn_block.append(AttnBlock(block_in, norm_type))
159
+ conv_block.res = res_block
160
+ conv_block.attn = attn_block
161
+ # downsample
162
+ if i_level != 0:
163
+ conv_block.upsample = Upsample(block_in, resamp_with_conv)
164
+ self.conv_blocks.append(conv_block)
165
+
166
+ # end
167
+ self.norm_out = Normalize(block_in, norm_type)
168
+ self.conv_out = nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1)
169
+
170
+ @property
171
+ def last_layer(self):
172
+ return self.conv_out.weight
173
+
174
+ def forward(self, z):
175
+ # z to block_in
176
+ h = self.conv_in(z)
177
+
178
+ # middle
179
+ for mid_block in self.mid:
180
+ h = mid_block(h)
181
+
182
+ # upsampling
183
+ for i_level, block in enumerate(self.conv_blocks):
184
+ for i_block in range(self.num_res_blocks + 1):
185
+ h = block.res[i_block](h)
186
+ if len(block.attn) > 0:
187
+ h = block.attn[i_block](h)
188
+ if i_level != self.num_resolutions - 1:
189
+ h = block.upsample(h)
190
+
191
+ # end
192
+ h = self.norm_out(h)
193
+ h = nonlinearity(h)
194
+ h = self.conv_out(h)
195
+ return h
196
+
197
+
198
+ class VectorQuantizer(nn.Module):
199
+ def __init__(self, n_e, e_dim, beta, entropy_loss_ratio, l2_norm, show_usage):
200
+ super().__init__()
201
+ self.n_e = n_e
202
+ self.e_dim = e_dim
203
+ self.beta = beta
204
+ self.entropy_loss_ratio = entropy_loss_ratio
205
+ self.l2_norm = l2_norm
206
+ self.show_usage = show_usage
207
+
208
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
209
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
210
+ if self.l2_norm:
211
+ self.embedding.weight.data = F.normalize(self.embedding.weight.data, p=2, dim=-1)
212
+ if self.show_usage:
213
+ self.register_buffer("codebook_used", nn.Parameter(torch.zeros(65536)))
214
+
215
+
216
+ def forward(self, z):
217
+ # reshape z -> (batch, height, width, channel) and flatten
218
+ z = torch.einsum('b c h w -> b h w c', z).contiguous()
219
+ z_flattened = z.view(-1, self.e_dim)
220
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
221
+
222
+ if self.l2_norm:
223
+ z = F.normalize(z, p=2, dim=-1)
224
+ z_flattened = F.normalize(z_flattened, p=2, dim=-1)
225
+ embedding = F.normalize(self.embedding.weight, p=2, dim=-1)
226
+ else:
227
+ embedding = self.embedding.weight
228
+
229
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
230
+ torch.sum(embedding**2, dim=1) - 2 * \
231
+ torch.einsum('bd,dn->bn', z_flattened, torch.einsum('n d -> d n', embedding))
232
+
233
+ min_encoding_indices = torch.argmin(d, dim=1)
234
+ z_q = embedding[min_encoding_indices].view(z.shape)
235
+ perplexity = None
236
+ min_encodings = None
237
+ vq_loss = None
238
+ commit_loss = None
239
+ entropy_loss = None
240
+ codebook_usage = 0
241
+
242
+ if self.show_usage and self.training:
243
+ cur_len = min_encoding_indices.shape[0]
244
+ self.codebook_used[:-cur_len] = self.codebook_used[cur_len:].clone()
245
+ self.codebook_used[-cur_len:] = min_encoding_indices
246
+ codebook_usage = len(torch.unique(self.codebook_used)) / self.n_e
247
+
248
+ # compute loss for embedding
249
+ if self.training:
250
+ vq_loss = torch.mean((z_q - z.detach()) ** 2)
251
+ commit_loss = self.beta * torch.mean((z_q.detach() - z) ** 2)
252
+ entropy_loss = self.entropy_loss_ratio * compute_entropy_loss(-d)
253
+
254
+ # preserve gradients
255
+ z_q = z + (z_q - z).detach()
256
+
257
+ # reshape back to match original input shape
258
+ z_q = torch.einsum('b h w c -> b c h w', z_q)
259
+
260
+ return z_q, (vq_loss, commit_loss, entropy_loss, codebook_usage), (perplexity, min_encodings, min_encoding_indices)
261
+
262
+ def get_codebook_entry(self, indices, shape=None, channel_first=True):
263
+ # shape = (batch, channel, height, width) if channel_first else (batch, height, width, channel)
264
+ if self.l2_norm:
265
+ embedding = F.normalize(self.embedding.weight, p=2, dim=-1)
266
+ else:
267
+ embedding = self.embedding.weight
268
+ z_q = embedding[indices] # (b*h*w, c)
269
+
270
+ if shape is not None:
271
+ if channel_first:
272
+ z_q = z_q.reshape(shape[0], shape[2], shape[3], shape[1])
273
+ # reshape back to match original input shape
274
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
275
+ else:
276
+ z_q = z_q.view(shape)
277
+ return z_q
278
+
279
+
280
+ class ResnetBlock(nn.Module):
281
+ def __init__(self, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, norm_type='group'):
282
+ super().__init__()
283
+ self.in_channels = in_channels
284
+ out_channels = in_channels if out_channels is None else out_channels
285
+ self.out_channels = out_channels
286
+ self.use_conv_shortcut = conv_shortcut
287
+
288
+ self.norm1 = Normalize(in_channels, norm_type)
289
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
290
+ self.norm2 = Normalize(out_channels, norm_type)
291
+ self.dropout = nn.Dropout(dropout)
292
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
293
+
294
+ if self.in_channels != self.out_channels:
295
+ if self.use_conv_shortcut:
296
+ self.conv_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
297
+ else:
298
+ self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
299
+
300
+ def forward(self, x):
301
+ h = x
302
+ h = self.norm1(h)
303
+ h = nonlinearity(h)
304
+ h = self.conv1(h)
305
+ h = self.norm2(h)
306
+ h = nonlinearity(h)
307
+ h = self.dropout(h)
308
+ h = self.conv2(h)
309
+
310
+ if self.in_channels != self.out_channels:
311
+ if self.use_conv_shortcut:
312
+ x = self.conv_shortcut(x)
313
+ else:
314
+ x = self.nin_shortcut(x)
315
+ return x+h
316
+
317
+
318
+ class AttnBlock(nn.Module):
319
+ def __init__(self, in_channels, norm_type='group'):
320
+ super().__init__()
321
+ self.norm = Normalize(in_channels, norm_type)
322
+ self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
323
+ self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
324
+ self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
325
+ self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
326
+
327
+
328
+ def forward(self, x):
329
+ h_ = x
330
+ h_ = self.norm(h_)
331
+ q = self.q(h_)
332
+ k = self.k(h_)
333
+ v = self.v(h_)
334
+
335
+ # compute attention
336
+ b, c, h, w = q.shape
337
+ q = rearrange(q, 'b c h w -> b (h w) c')
338
+ k = rearrange(k, 'b c h w -> b (h w) c')
339
+ v = rearrange(v, 'b c h w -> b (h w) c')
340
+
341
+ # q = q.reshape(b,c,h*w)
342
+ # q = q.permute(0,2,1) # b,hw,c
343
+ # k = k.reshape(b,c,h*w) # b,c,hw
344
+
345
+ # w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
346
+ # w_ = w_ * (int(c)**(-0.5))
347
+ # w_ = F.softmax(w_, dim=2)
348
+
349
+ # # attend to values
350
+ # v = v.reshape(b,c,h*w)
351
+ # w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
352
+ # h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
353
+
354
+ h_ = nn.functional.scaled_dot_product_attention(q, k, v)
355
+ h_ = rearrange(h_, 'b (h w) c -> b c h w', h=h, w=w)
356
+ # h_ = h_.reshape(b,c,h,w)
357
+
358
+ h_ = self.proj_out(h_)
359
+
360
+ return x+h_
361
+
362
+
363
+ def nonlinearity(x):
364
+ # swish
365
+ return x*torch.sigmoid(x)
366
+
367
+
368
+ def Normalize(in_channels, norm_type='group'):
369
+ assert norm_type in ['group', 'batch']
370
+ if norm_type == 'group':
371
+ return nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
372
+ elif norm_type == 'batch':
373
+ return nn.SyncBatchNorm(in_channels)
374
+
375
+
376
+ class Upsample(nn.Module):
377
+ def __init__(self, in_channels, with_conv):
378
+ super().__init__()
379
+ self.with_conv = with_conv
380
+ if self.with_conv:
381
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
382
+
383
+ def forward(self, x):
384
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
385
+ if self.with_conv:
386
+ x = self.conv(x)
387
+ return x
388
+
389
+
390
+ class Downsample(nn.Module):
391
+ def __init__(self, in_channels, with_conv):
392
+ super().__init__()
393
+ self.with_conv = with_conv
394
+ if self.with_conv:
395
+ # no asymmetric padding in torch conv, must do it ourselves
396
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
397
+
398
+ def forward(self, x):
399
+ if self.with_conv:
400
+ pad = (0,1,0,1)
401
+ x = F.pad(x, pad, mode="constant", value=0)
402
+ x = self.conv(x)
403
+ else:
404
+ x = F.avg_pool2d(x, kernel_size=2, stride=2)
405
+ return x
406
+
407
+
408
+ def compute_entropy_loss(affinity, loss_type="softmax", temperature=0.01):
409
+ flat_affinity = affinity.reshape(-1, affinity.shape[-1])
410
+ flat_affinity /= temperature
411
+ probs = F.softmax(flat_affinity, dim=-1)
412
+ log_probs = F.log_softmax(flat_affinity + 1e-5, dim=-1)
413
+ if loss_type == "softmax":
414
+ target_probs = probs
415
+ else:
416
+ raise ValueError("Entropy loss {} not supported".format(loss_type))
417
+ avg_probs = torch.mean(target_probs, dim=0)
418
+ avg_entropy = - torch.sum(avg_probs * torch.log(avg_probs + 1e-5))
419
+ sample_entropy = - torch.mean(torch.sum(target_probs * log_probs, dim=-1))
420
+ loss = sample_entropy - avg_entropy
421
+ return loss
422
+
423
+
424
+ def compute_entropy_loss2(
425
+ logits,
426
+ temperature=0.01,
427
+ sample_minimization_weight=1.0,
428
+ batch_maximization_weight=1.0,
429
+ eps=1e-5,
430
+ ):
431
+ """
432
+ Entropy loss of unnormalized logits
433
+
434
+ logits: Affinities are over the last dimension
435
+
436
+ https://github.com/google-research/magvit/blob/05e8cfd6559c47955793d70602d62a2f9b0bdef5/videogvt/train_lib/losses.py#L279
437
+ LANGUAGE MODEL BEATS DIFFUSION — TOKENIZER IS KEY TO VISUAL GENERATION (2024)
438
+ """
439
+ probs = F.softmax(logits / temperature, -1)
440
+ log_probs = F.log_softmax(logits / temperature + eps, -1)
441
+
442
+ avg_probs = reduce(probs, "... D -> D", "mean")
443
+
444
+ avg_entropy = -torch.sum(avg_probs * torch.log(avg_probs + eps))
445
+
446
+ sample_entropy = -torch.sum(probs * log_probs, -1)
447
+ sample_entropy = torch.mean(sample_entropy)
448
+
449
+ loss = (sample_minimization_weight * sample_entropy) - (
450
+ batch_maximization_weight * avg_entropy
451
+ )
452
+
453
+ return sample_entropy, avg_entropy, loss
454
+
455
+
456
+ def VQ_16(**kwargs):
457
+ return VQModel(ModelArgs(encoder_ch_mult=[1, 1, 2, 2, 4], decoder_ch_mult=[1, 1, 2, 2, 4], **kwargs))
458
+
459
+ VQ_models = {'VQ-16': VQ_16}