Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -89,7 +89,7 @@ tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_c
|
|
89 |
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
90 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
-
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', token=True)
|
93 |
|
94 |
def load_and_prepare_model():
|
95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
@@ -102,11 +102,13 @@ def load_and_prepare_model():
|
|
102 |
tokenizer=None,
|
103 |
tokenizer_2=None,
|
104 |
scheduler=None,
|
|
|
105 |
vae=None,
|
106 |
)
|
107 |
pipe.scheduler=scheduler
|
108 |
pipe.tokenizer=tokenizer_1
|
109 |
pipe.tokenizer_2=tokenizer_2
|
|
|
110 |
#pipe.vae.do_resize=False
|
111 |
#pipe.vae.vae_scale_factor=8
|
112 |
#pipe.to(device)
|
|
|
89 |
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
90 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
+
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
93 |
|
94 |
def load_and_prepare_model():
|
95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
|
102 |
tokenizer=None,
|
103 |
tokenizer_2=None,
|
104 |
scheduler=None,
|
105 |
+
unet=None,
|
106 |
vae=None,
|
107 |
)
|
108 |
pipe.scheduler=scheduler
|
109 |
pipe.tokenizer=tokenizer_1
|
110 |
pipe.tokenizer_2=tokenizer_2
|
111 |
+
pipe.unet=unet
|
112 |
#pipe.vae.do_resize=False
|
113 |
#pipe.vae.vae_scale_factor=8
|
114 |
#pipe.to(device)
|