ovi054 commited on
Commit
154976a
·
verified ·
1 Parent(s): fa751aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -5,7 +5,6 @@ import spaces
5
  import torch
6
  from diffusers import QwenImagePipeline
7
 
8
-
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
@@ -21,8 +20,6 @@ MAX_IMAGE_SIZE = 2048
21
 
22
  # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
23
 
24
-
25
-
26
  @spaces.GPU()
27
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=4, num_inference_steps=28, lora_id=None, lora_scale=0.95, progress=gr.Progress(track_tqdm=True)):
28
  if randomize_seed:
@@ -32,7 +29,7 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
32
 
33
  if lora_id and lora_id.strip() != "":
34
  pipe.unload_lora_weights()
35
- pipe.load_lora_weights(lora_id)
36
 
37
 
38
  try:
 
5
  import torch
6
  from diffusers import QwenImagePipeline
7
 
 
8
  dtype = torch.bfloat16
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
 
20
 
21
  # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
22
 
 
 
23
  @spaces.GPU()
24
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=4, num_inference_steps=28, lora_id=None, lora_scale=0.95, progress=gr.Progress(track_tqdm=True)):
25
  if randomize_seed:
 
29
 
30
  if lora_id and lora_id.strip() != "":
31
  pipe.unload_lora_weights()
32
+ pipe.load_lora_weights(lora_id.strip())
33
 
34
 
35
  try: