IbarakiDouji commited on
Commit
66aee95
·
1 Parent(s): 0a5444f

fix: keep vae

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -239,7 +239,12 @@ def generate(
239
  # Model initialization
240
  if torch.cuda.is_available():
241
  try:
242
- pipe = utils.load_pipeline(MODEL, device)
 
 
 
 
 
243
  logger.info("Pipeline loaded successfully on GPU!")
244
  except Exception as e:
245
  logger.error(f"Error loading VAE, falling back to default: {e}")
 
239
  # Model initialization
240
  if torch.cuda.is_available():
241
  try:
242
+ logger.info("Loading VAE and pipeline...")
243
+ vae = AutoencoderKL.from_pretrained(
244
+ "madebyollin/sdxl-vae-fp16-fix",
245
+ torch_dtype=torch.float16,
246
+ )
247
+ pipe = utils.load_pipeline(MODEL, device, vae=vae)
248
  logger.info("Pipeline loaded successfully on GPU!")
249
  except Exception as e:
250
  logger.error(f"Error loading VAE, falling back to default: {e}")