Himanshu-AT commited on
Commit
c1581f5
·
1 Parent(s): 4e648a1

second prompt

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -3,7 +3,7 @@ import numpy as np
3
 
4
  import spaces
5
  import random
6
- from image_gen_aux import DepthPreprocessor
7
  from PIL import Image
8
  import torch
9
  from torchvision import transforms
@@ -19,8 +19,8 @@ pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", tor
19
  pipe.load_lora_weights("alvdansen/flux-koda")
20
  pipe.enable_lora()
21
 
22
- vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae")
23
- processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
24
 
25
  preprocess = transforms.Compose(
26
  [
@@ -80,7 +80,7 @@ def calculate_optimal_dimensions(image: Image.Image):
80
  return width, height
81
 
82
  @spaces.GPU(durations=300)
83
- def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
84
  # pipe.enable_xformers_memory_efficient_attention()
85
 
86
  image = edit_images["background"]
@@ -89,10 +89,11 @@ def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height
89
  if randomize_seed:
90
  seed = random.randint(0, MAX_SEED)
91
 
92
- controlImage = processor(image)
93
  image = pipe(
94
- mask_image_latent=vae.encode(controlImage),
95
  prompt=prompt,
 
96
  image=image,
97
  mask_image=mask,
98
  height=height,
@@ -146,6 +147,13 @@ with gr.Blocks(css=css) as demo:
146
  placeholder="Enter your prompt",
147
  container=False,
148
  )
 
 
 
 
 
 
 
149
  run_button = gr.Button("Run")
150
 
151
  result = gr.Image(label="Result", show_label=False)
 
3
 
4
  import spaces
5
  import random
6
+ # from image_gen_aux import DepthPreprocessor
7
  from PIL import Image
8
  import torch
9
  from torchvision import transforms
 
19
  pipe.load_lora_weights("alvdansen/flux-koda")
20
  pipe.enable_lora()
21
 
22
+ # vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae")
23
+ # processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
24
 
25
  preprocess = transforms.Compose(
26
  [
 
80
  return width, height
81
 
82
  @spaces.GPU(durations=300)
83
+ def infer(edit_images, prompt, prompt_2, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
84
  # pipe.enable_xformers_memory_efficient_attention()
85
 
86
  image = edit_images["background"]
 
89
  if randomize_seed:
90
  seed = random.randint(0, MAX_SEED)
91
 
92
+ # controlImage = processor(image)
93
  image = pipe(
94
+ # mask_image_latent=vae.encode(controlImage),
95
  prompt=prompt,
96
+ prompt_2=prompt_2,
97
  image=image,
98
  mask_image=mask,
99
  height=height,
 
147
  placeholder="Enter your prompt",
148
  container=False,
149
  )
150
+ prompt = gr.Text(
151
+ label="Prompt2",
152
+ show_label=False,
153
+ max_lines=2,
154
+ placeholder="Enter your second prompt",
155
+ container=False,
156
+ )
157
  run_button = gr.Button("Run")
158
 
159
  result = gr.Image(label="Result", show_label=False)