Files changed (6) hide show
  1. .DS_Store +0 -0
  2. README.md +2 -2
  3. app.py +22 -159
  4. lora_models.json +2 -4
  5. readme.md +2 -2
  6. requirements.txt +0 -3
.DS_Store DELETED
Binary file (6.15 kB)
 
README.md CHANGED
@@ -4,9 +4,9 @@ emoji: 🏆
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: true
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.14.0
8
  app_file: app.py
9
  pinned: true
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,26 +1,24 @@
1
- import spaces
2
-
3
  import gradio as gr
4
  import numpy as np
5
  import os
 
6
  import random
7
  import json
 
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
11
- import zipfile
12
 
13
  from diffusers import FluxFillPipeline, AutoencoderKL
14
  from PIL import Image
15
- # from samgeo.text_sam import LangSAM
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  MAX_IMAGE_SIZE = 2048
19
 
20
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
- # sam = LangSAM(model_type="sam2-hiera-large").to(device)
22
-
23
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
 
 
24
 
25
  with open("lora_models.json", "r") as f:
26
  lora_models = json.load(f)
@@ -39,68 +37,21 @@ for model_name, model_path in lora_models.items():
39
 
40
  lora_models["None"] = None
41
 
42
- def calculate_optimal_dimensions(image: Image.Image):
43
- # Extract the original dimensions
44
- original_width, original_height = image.size
45
-
46
- # Set constants
47
- MIN_ASPECT_RATIO = 9 / 16
48
- MAX_ASPECT_RATIO = 16 / 9
49
- FIXED_DIMENSION = 1024
50
-
51
- # Calculate the aspect ratio of the original image
52
- original_aspect_ratio = original_width / original_height
53
-
54
- # Determine which dimension to fix
55
- if original_aspect_ratio > 1: # Wider than tall
56
- width = FIXED_DIMENSION
57
- height = round(FIXED_DIMENSION / original_aspect_ratio)
58
- else: # Taller than wide
59
- height = FIXED_DIMENSION
60
- width = round(FIXED_DIMENSION * original_aspect_ratio)
61
-
62
- # Ensure dimensions are multiples of 8
63
- width = (width // 8) * 8
64
- height = (height // 8) * 8
65
-
66
- # Enforce aspect ratio limits
67
- calculated_aspect_ratio = width / height
68
- if calculated_aspect_ratio > MAX_ASPECT_RATIO:
69
- width = (height * MAX_ASPECT_RATIO // 8) * 8
70
- elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
71
- height = (width / MIN_ASPECT_RATIO // 8) * 8
72
-
73
- # Ensure width and height remain above the minimum dimensions
74
- width = max(width, 576) if width == FIXED_DIMENSION else width
75
- height = max(height, 576) if height == FIXED_DIMENSION else height
76
-
77
- return width, height
78
-
79
  @spaces.GPU(durations=300)
80
- def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
81
  # pipe.enable_xformers_memory_efficient_attention()
82
- gr.Info("Infering")
83
 
84
  if lora_model != "None":
85
  pipe.load_lora_weights(lora_models[lora_model])
86
  pipe.enable_lora()
87
 
88
- gr.Info("starting checks")
89
-
90
  image = edit_images["background"]
 
91
  mask = edit_images["layers"][0]
92
-
93
- if not image:
94
- gr.Info("Please upload an image.")
95
- return None, None
96
-
97
-
98
- width, height = calculate_optimal_dimensions(image)
99
  if randomize_seed:
100
  seed = random.randint(0, MAX_SEED)
101
 
102
  # controlImage = processor(image)
103
- gr.Info("generating image")
104
  image = pipe(
105
  # mask_image_latent=vae.encode(controlImage),
106
  prompt=prompt,
@@ -110,10 +61,8 @@ def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=Fal
110
  height=height,
111
  width=width,
112
  guidance_scale=guidance_scale,
113
- # strength=strength,
114
  num_inference_steps=num_inference_steps,
115
  generator=torch.Generator(device='cuda').manual_seed(seed),
116
- # generator=torch.Generator().manual_seed(seed),
117
  # lora_scale=0.75 // not supported in this version
118
  ).images[0]
119
 
@@ -123,56 +72,6 @@ def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=Fal
123
  return output_image_jpg, seed
124
  # return image, seed
125
 
126
- def download_image(image):
127
- if isinstance(image, np.ndarray):
128
- image = Image.fromarray(image)
129
- image.save("output.png", "PNG")
130
- return "output.png"
131
-
132
- def save_details(result, edit_image, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps):
133
- image = edit_image["background"]
134
- mask = edit_image["layers"][0]
135
-
136
- if isinstance(result, np.ndarray):
137
- result = Image.fromarray(result)
138
- if isinstance(image, np.ndarray):
139
- image = Image.fromarray(image)
140
- if isinstance(mask, np.ndarray):
141
- mask = Image.fromarray(mask)
142
-
143
- result.save("saved_result.png", "PNG")
144
- image.save("saved_image.png", "PNG")
145
- mask.save("saved_mask.png", "PNG")
146
-
147
- details = {
148
- "prompt": prompt,
149
- "lora_model": lora_model,
150
- "strength": strength,
151
- "seed": seed,
152
- "guidance_scale": guidance_scale,
153
- "num_inference_steps": num_inference_steps
154
- }
155
-
156
- with open("details.json", "w") as f:
157
- json.dump(details, f)
158
-
159
- # Create a ZIP file
160
- with zipfile.ZipFile("output.zip", "w") as zipf:
161
- zipf.write("saved_result.png")
162
- zipf.write("saved_image.png")
163
- zipf.write("saved_mask.png")
164
- zipf.write("details.json")
165
-
166
- return "output.zip"
167
-
168
- def set_image_as_inpaint(image):
169
- return image
170
-
171
- # def generate_mask(image, click_x, click_y):
172
- # text_prompt = "face"
173
- # mask = sam.predict(image, text_prompt, box_threshold=0.24, text_threshold=0.24)
174
- # return mask
175
-
176
  examples = [
177
  "photography of a young woman, accent lighting, (front view:1.4), "
178
  # "a tiny astronaut hatching from an egg on the moon",
@@ -253,65 +152,29 @@ with gr.Blocks(css=css) as demo:
253
 
254
  with gr.Row():
255
 
256
- strength = gr.Slider(
257
- label="Strength",
258
- minimum=0,
259
- maximum=1,
260
- step=0.01,
261
- value=0.85,
262
  )
263
 
264
- # width = gr.Slider(
265
- # label="width",
266
- # minimum=512,
267
- # maximum=3072,
268
- # step=1,
269
- # value=1024,
270
- # )
271
-
272
- # height = gr.Slider(
273
- # label="height",
274
- # minimum=512,
275
- # maximum=3072,
276
- # step=1,
277
- # value=1024,
278
- # )
279
 
280
  gr.on(
281
  triggers=[run_button.click, prompt.submit],
282
  fn = infer,
283
- inputs = [edit_image, prompt, lora_model, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
284
  outputs = [result, seed]
285
  )
286
 
287
- download_button = gr.Button("Download Image as PNG")
288
- set_inpaint_button = gr.Button("Set Image as Inpaint")
289
- save_button = gr.Button("Save Details")
290
-
291
- download_button.click(
292
- fn=download_image,
293
- inputs=[result],
294
- outputs=gr.File(label="Download Image")
295
- )
296
-
297
- set_inpaint_button.click(
298
- fn=set_image_as_inpaint,
299
- inputs=[result],
300
- outputs=[edit_image]
301
- )
302
-
303
- save_button.click(
304
- fn=save_details,
305
- inputs=[result, edit_image, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps],
306
- outputs=gr.File(label="Download/Save Status")
307
- )
308
-
309
- # edit_image.select(
310
- # fn=generate_mask,
311
- # inputs=[edit_image, gr.Number(), gr.Number()],
312
- # outputs=[edit_image]
313
- # )
314
-
315
  # demo.launch()
316
  PASSWORD = os.getenv("GRADIO_PASSWORD")
317
  USERNAME = os.getenv("GRADIO_USERNAME")
@@ -324,4 +187,4 @@ def authenticate(username, password):
324
  return False
325
  # Launch the app with authentication
326
 
327
- demo.launch(debug=True, auth=authenticate)
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import os
4
+ import spaces
5
  import random
6
  import json
7
+ # from image_gen_aux import DepthPreprocessor
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
 
11
 
12
  from diffusers import FluxFillPipeline, AutoencoderKL
13
  from PIL import Image
14
+
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 2048
18
 
 
 
 
19
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
20
+ # pipe.load_lora_weights("Himanshu806/testLora")
21
+ # pipe.enable_lora()
22
 
23
  with open("lora_models.json", "r") as f:
24
  lora_models = json.load(f)
 
37
 
38
  lora_models["None"] = None
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  @spaces.GPU(durations=300)
41
+ def infer(edit_images, prompt, width, height, lora_model, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
42
  # pipe.enable_xformers_memory_efficient_attention()
 
43
 
44
  if lora_model != "None":
45
  pipe.load_lora_weights(lora_models[lora_model])
46
  pipe.enable_lora()
47
 
 
 
48
  image = edit_images["background"]
49
+ # width, height = calculate_optimal_dimensions(image)
50
  mask = edit_images["layers"][0]
 
 
 
 
 
 
 
51
  if randomize_seed:
52
  seed = random.randint(0, MAX_SEED)
53
 
54
  # controlImage = processor(image)
 
55
  image = pipe(
56
  # mask_image_latent=vae.encode(controlImage),
57
  prompt=prompt,
 
61
  height=height,
62
  width=width,
63
  guidance_scale=guidance_scale,
 
64
  num_inference_steps=num_inference_steps,
65
  generator=torch.Generator(device='cuda').manual_seed(seed),
 
66
  # lora_scale=0.75 // not supported in this version
67
  ).images[0]
68
 
 
72
  return output_image_jpg, seed
73
  # return image, seed
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  examples = [
76
  "photography of a young woman, accent lighting, (front view:1.4), "
77
  # "a tiny astronaut hatching from an egg on the moon",
 
152
 
153
  with gr.Row():
154
 
155
+ width = gr.Slider(
156
+ label="width",
157
+ minimum=512,
158
+ maximum=3072,
159
+ step=1,
160
+ value=1024,
161
  )
162
 
163
+ height = gr.Slider(
164
+ label="height",
165
+ minimum=512,
166
+ maximum=3072,
167
+ step=1,
168
+ value=1024,
169
+ )
 
 
 
 
 
 
 
 
170
 
171
  gr.on(
172
  triggers=[run_button.click, prompt.submit],
173
  fn = infer,
174
+ inputs = [edit_image, prompt, width, height, lora_model, seed, randomize_seed, guidance_scale, num_inference_steps],
175
  outputs = [result, seed]
176
  )
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  # demo.launch()
179
  PASSWORD = os.getenv("GRADIO_PASSWORD")
180
  USERNAME = os.getenv("GRADIO_USERNAME")
 
187
  return False
188
  # Launch the app with authentication
189
 
190
+ demo.launch(auth=authenticate)
lora_models.json CHANGED
@@ -1,6 +1,4 @@
1
  {
2
- "RahulFineTuned (qwertyui)": "Himanshu806/testLora",
3
- "femaleIndian (indmodelf)": "Himanshu806/ind-f-model",
4
- "KodaRealistic (flmft style)": "alvdansen/flux-koda",
5
- "superRealism (Super Realism)": "strangerzonehf/Flux-Super-Realism-LoRA"
6
  }
 
1
  {
2
+ "RahulFineTuned": "Himanshu806/testLora",
3
+ "KodaRealistic": "alvdansen/flux-koda"
 
 
4
  }
readme.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Inpainting Test UI
3
  emoji: 🏆
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.14.0
8
  app_file: app.py
9
  pinned: true
10
  ---
 
1
  ---
2
+ title: Inpainting
3
  emoji: 🏆
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: true
10
  ---
requirements.txt CHANGED
@@ -8,6 +8,3 @@ peft
8
  xformers
9
  torchvision
10
  torch
11
- opencv-python
12
- segment-geospatial
13
- groundingdino-py
 
8
  xformers
9
  torchvision
10
  torch