Upload folder using huggingface_hub
Browse files
model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12c37d621fc5de4ec16be3e5715a73b0e0121e3ca6289942b9bdad35a869ef97
|
3 |
+
size 144135290
|
model.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import UNet2DModel
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from typing import Optional, Tuple, Union
|
6 |
+
from collections import OrderedDict
|
7 |
+
from dataclasses import dataclass
|
8 |
+
from datasets import load_dataset
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
from torchvision import transforms
|
11 |
+
from functools import partial
|
12 |
+
import torch
|
13 |
+
from torch.utils.data import DataLoader
|
14 |
+
from PIL import Image
|
15 |
+
from diffusers import DDPMScheduler
|
16 |
+
import torch.nn.functional as F
|
17 |
+
from accelerate import Accelerator
|
18 |
+
from diffusers import DDPMPipeline
|
19 |
+
import os
|
20 |
+
from huggingface_hub import create_repo, upload_folder
|
21 |
+
|
22 |
+
|
23 |
+
class DPM(UNet2DModel):
|
24 |
+
def __init__(self, *args, **kwargs):
|
25 |
+
super().__init__(*args, **kwargs)
|
26 |
+
|
27 |
+
# créer bottleneck_attn ici (selon ton architecture)
|
28 |
+
self.bottleneck_attn = nn.MultiheadAttention(
|
29 |
+
embed_dim=self.config.block_out_channels[-1],
|
30 |
+
num_heads=8, # ou ajuster selon besoin
|
31 |
+
batch_first=True
|
32 |
+
)
|
33 |
+
|
34 |
+
def forward(
|
35 |
+
self,
|
36 |
+
sample: torch.Tensor,
|
37 |
+
timestep: Union[torch.Tensor, float, int],
|
38 |
+
class_labels: Optional[torch.Tensor] = None,
|
39 |
+
return_dict: bool = True,
|
40 |
+
prototype: Optional[torch.Tensor] = None, # <--- ajouté ici
|
41 |
+
) -> Union[UNet2DOutput, Tuple]:
|
42 |
+
r"""
|
43 |
+
The [`UNet2DModel`] forward method.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
sample (`torch.Tensor`):
|
47 |
+
The noisy input tensor with the following shape `(batch, channel, height, width)`.
|
48 |
+
timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
|
49 |
+
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
50 |
+
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
51 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
52 |
+
Whether or not to return a [`~models.unets.unet_2d.UNet2DOutput`] instead of a plain tuple.
|
53 |
+
|
54 |
+
Returns:
|
55 |
+
[`~models.unets.unet_2d.UNet2DOutput`] or `tuple`:
|
56 |
+
If `return_dict` is True, an [`~models.unets.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
|
57 |
+
returned where the first element is the sample tensor.
|
58 |
+
"""
|
59 |
+
# 0. center input if necessary
|
60 |
+
if self.config.center_input_sample:
|
61 |
+
sample = 2 * sample - 1.0
|
62 |
+
|
63 |
+
# 1. time
|
64 |
+
timesteps = timestep
|
65 |
+
if not torch.is_tensor(timesteps):
|
66 |
+
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
|
67 |
+
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
|
68 |
+
timesteps = timesteps[None].to(sample.device)
|
69 |
+
|
70 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
71 |
+
timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
|
72 |
+
|
73 |
+
t_emb = self.time_proj(timesteps)
|
74 |
+
|
75 |
+
# timesteps does not contain any weights and will always return f32 tensors
|
76 |
+
# but time_embedding might actually be running in fp16. so we need to cast here.
|
77 |
+
# there might be better ways to encapsulate this.
|
78 |
+
t_emb = t_emb.to(dtype=self.dtype)
|
79 |
+
emb = self.time_embedding(t_emb)
|
80 |
+
|
81 |
+
if self.class_embedding is not None:
|
82 |
+
if class_labels is None:
|
83 |
+
raise ValueError("class_labels should be provided when doing class conditioning")
|
84 |
+
|
85 |
+
if self.config.class_embed_type == "timestep":
|
86 |
+
class_labels = self.time_proj(class_labels)
|
87 |
+
|
88 |
+
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
89 |
+
emb = emb + class_emb
|
90 |
+
elif self.class_embedding is None and class_labels is not None:
|
91 |
+
raise ValueError("class_embedding needs to be initialized in order to use class conditioning")
|
92 |
+
|
93 |
+
# 2. pre-process
|
94 |
+
skip_sample = sample
|
95 |
+
sample = self.conv_in(sample)
|
96 |
+
|
97 |
+
# 3. down
|
98 |
+
down_block_res_samples = (sample,)
|
99 |
+
for downsample_block in self.down_blocks:
|
100 |
+
if hasattr(downsample_block, "skip_conv"):
|
101 |
+
sample, res_samples, skip_sample = downsample_block(
|
102 |
+
hidden_states=sample, temb=emb, skip_sample=skip_sample
|
103 |
+
)
|
104 |
+
else:
|
105 |
+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
106 |
+
|
107 |
+
down_block_res_samples += res_samples
|
108 |
+
|
109 |
+
# ----------- Cross-Attention after downsampling ------------------
|
110 |
+
if prototype is None:
|
111 |
+
raise ValueError("You must provide a `prototype` tensor for cross-attention")
|
112 |
+
|
113 |
+
b, c, h, w = sample.shape
|
114 |
+
query = sample.view(b, c, h * w).transpose(1, 2) # (B, HW, C)
|
115 |
+
|
116 |
+
# prototype: expected shape (B, N, C)
|
117 |
+
key = value = prototype.to(dtype=sample.dtype)
|
118 |
+
|
119 |
+
attn_output, _ = self.bottleneck_attn(query, key, value)
|
120 |
+
attn_output = attn_output.transpose(1, 2).view(b, c, h, w) # (B, C, H, W)
|
121 |
+
|
122 |
+
# Résiduel
|
123 |
+
sample = sample + attn_output
|
124 |
+
# ---------------------------------------------------------------
|
125 |
+
|
126 |
+
|
127 |
+
# 4. mid
|
128 |
+
if self.mid_block is not None:
|
129 |
+
sample = self.mid_block(sample, emb)
|
130 |
+
|
131 |
+
# 5. up
|
132 |
+
skip_sample = None
|
133 |
+
for upsample_block in self.up_blocks:
|
134 |
+
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
135 |
+
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
136 |
+
|
137 |
+
if hasattr(upsample_block, "skip_conv"):
|
138 |
+
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
|
139 |
+
else:
|
140 |
+
sample = upsample_block(sample, res_samples, emb)
|
141 |
+
|
142 |
+
# 6. post-process
|
143 |
+
sample = self.conv_norm_out(sample)
|
144 |
+
sample = self.conv_act(sample)
|
145 |
+
sample = self.conv_out(sample)
|
146 |
+
|
147 |
+
if skip_sample is not None:
|
148 |
+
sample += skip_sample
|
149 |
+
|
150 |
+
if self.config.time_embedding_type == "fourier":
|
151 |
+
timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
|
152 |
+
sample = sample / timesteps
|
153 |
+
|
154 |
+
if not return_dict:
|
155 |
+
return (sample,)
|
156 |
+
|
157 |
+
return UNet2DOutput(sample=sample)
|