1inkusFace commited on
Commit
3e16887
·
verified ·
1 Parent(s): ff799c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -15,7 +15,7 @@ import numpy as np
15
  from PIL import Image
16
  import torch
17
  #import diffusers
18
- from diffusers import AutoencoderKL, StableDiffusionXLPipeline
19
  from diffusers import EulerAncestralDiscreteScheduler
20
  from typing import Tuple
21
  import paramiko
@@ -86,6 +86,7 @@ os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
86
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
87
 
88
  def load_and_prepare_model():
 
89
  #vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=True, token=True)
90
  #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
91
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
@@ -100,6 +101,7 @@ def load_and_prepare_model():
100
  add_watermarker=False,
101
  #text_encoder=None,
102
  #text_encoder_2=None,
 
103
  vae=None,
104
  )
105
  #pipe.vae = vaeXL #.to(torch.bfloat16)
@@ -122,8 +124,8 @@ def load_and_prepare_model():
122
 
123
  pipe = load_and_prepare_model()
124
 
125
- text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
126
- text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
127
 
128
  MAX_SEED = np.iinfo(np.int32).max
129
 
 
15
  from PIL import Image
16
  import torch
17
  #import diffusers
18
+ from diffusers import AutoencoderKL, StableDiffusionXLPipeline, UNet2DConditionModel
19
  from diffusers import EulerAncestralDiscreteScheduler
20
  from typing import Tuple
21
  import paramiko
 
86
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
87
 
88
  def load_and_prepare_model():
89
+ unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
90
  #vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=True, token=True)
91
  #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
92
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
 
101
  add_watermarker=False,
102
  #text_encoder=None,
103
  #text_encoder_2=None,
104
+ unet=unet,
105
  vae=None,
106
  )
107
  #pipe.vae = vaeXL #.to(torch.bfloat16)
 
124
 
125
  pipe = load_and_prepare_model()
126
 
127
+ text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
128
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
129
 
130
  MAX_SEED = np.iinfo(np.int32).max
131