Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ import paramiko
|
|
18 |
import datetime
|
19 |
#import diffusers
|
20 |
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, EulerAncestralDiscreteScheduler
|
21 |
-
from diffusers.models.attention_processor import AttnProcessor2_0
|
22 |
from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
|
23 |
|
24 |
torch.backends.cuda.matmul.allow_tf32 = False
|
@@ -85,8 +85,12 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
85 |
|
86 |
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
87 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
88 |
-
|
89 |
def load_and_prepare_model():
|
|
|
|
|
|
|
|
|
|
|
90 |
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
|
91 |
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
92 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
@@ -119,7 +123,7 @@ def load_and_prepare_model():
|
|
119 |
#pipe.unet.to(memory_format=torch.channels_last)
|
120 |
pipe.to(device=device, dtype=torch.bfloat16)
|
121 |
pipe.vae = vaeXL.to(device) #.to('cpu') #.to(torch.bfloat16)
|
122 |
-
pipe.unet.set_attn_processor(
|
123 |
pipe.vae.set_default_attn_processor()
|
124 |
return pipe
|
125 |
|
|
|
18 |
import datetime
|
19 |
#import diffusers
|
20 |
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, EulerAncestralDiscreteScheduler
|
21 |
+
from diffusers.models.attention_processor import Attention, AttnProcessor2_0
|
22 |
from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
|
23 |
|
24 |
torch.backends.cuda.matmul.allow_tf32 = False
|
|
|
85 |
|
86 |
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
87 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
|
|
88 |
def load_and_prepare_model():
|
89 |
+
proc=Attention(
|
90 |
+
upcast_attention=True,
|
91 |
+
# upcast_softmax = True,
|
92 |
+
processor = AttnProcessor2_0
|
93 |
+
)
|
94 |
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
|
95 |
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
96 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
|
|
123 |
#pipe.unet.to(memory_format=torch.channels_last)
|
124 |
pipe.to(device=device, dtype=torch.bfloat16)
|
125 |
pipe.vae = vaeXL.to(device) #.to('cpu') #.to(torch.bfloat16)
|
126 |
+
pipe.unet.set_attn_processor(proc)
|
127 |
pipe.vae.set_default_attn_processor()
|
128 |
return pipe
|
129 |
|