Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -84,15 +84,15 @@ os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
|
84 |
|
85 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
86 |
|
87 |
-
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder'
|
88 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
89 |
def load_and_prepare_model():
|
90 |
proc=Attention(query_dim=4, upcast_attention=True, upcast_softmax = True, processor = AttnProcessor2_0)
|
91 |
-
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer',
|
92 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2',
|
93 |
-
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler'
|
94 |
-
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32
|
95 |
-
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated
|
96 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
97 |
'ford442/RealVisXL_V5.0_BF16',
|
98 |
#torch_dtype=torch.bfloat16,
|
|
|
84 |
|
85 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
86 |
|
87 |
+
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder')#.to(device=device, dtype=torch.bfloat16)
|
88 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
89 |
def load_and_prepare_model():
|
90 |
proc=Attention(query_dim=4, upcast_attention=True, upcast_softmax = True, processor = AttnProcessor2_0)
|
91 |
+
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer', use_fast=True)
|
92 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', use_fast=True)
|
93 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler')
|
94 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
95 |
+
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated')
|
96 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
97 |
'ford442/RealVisXL_V5.0_BF16',
|
98 |
#torch_dtype=torch.bfloat16,
|