Spaces:
Running
on
Zero
Running
on
Zero
Optimize the GPU use
#3
by
Fabrice-TIERCELIN
- opened
app.py
CHANGED
@@ -88,7 +88,6 @@ def check(
|
|
88 |
):
|
89 |
raise gr.Error("At least one border must be enlarged.")
|
90 |
|
91 |
-
@spaces.GPU
|
92 |
def uncrop(
|
93 |
input_image,
|
94 |
enlarge_top,
|
@@ -244,21 +243,20 @@ def uncrop(
|
|
244 |
|
245 |
progress(None, desc = "Processing...")
|
246 |
|
247 |
-
output_image =
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
prompt
|
252 |
-
negative_prompt
|
253 |
-
|
254 |
-
mask_image
|
255 |
-
num_inference_steps
|
256 |
-
guidance_scale
|
257 |
-
image_guidance_scale
|
258 |
-
strength
|
259 |
-
denoising_steps
|
260 |
-
|
261 |
-
).images[0]
|
262 |
|
263 |
if limitation != "":
|
264 |
output_image = output_image.resize((output_width, output_height))
|
@@ -282,6 +280,37 @@ def uncrop(
|
|
282 |
mask_image
|
283 |
]
|
284 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
with gr.Blocks() as interface:
|
286 |
gr.HTML(
|
287 |
DESCRIPTION
|
|
|
88 |
):
|
89 |
raise gr.Error("At least one border must be enlarged.")
|
90 |
|
|
|
91 |
def uncrop(
|
92 |
input_image,
|
93 |
enlarge_top,
|
|
|
243 |
|
244 |
progress(None, desc = "Processing...")
|
245 |
|
246 |
+
output_image = uncrop_on_gpu(
|
247 |
+
seed,
|
248 |
+
process_width,
|
249 |
+
process_height,
|
250 |
+
prompt,
|
251 |
+
negative_prompt,
|
252 |
+
enlarged_image,
|
253 |
+
mask_image,
|
254 |
+
num_inference_steps,
|
255 |
+
guidance_scale,
|
256 |
+
image_guidance_scale,
|
257 |
+
strength,
|
258 |
+
denoising_steps
|
259 |
+
)
|
|
|
260 |
|
261 |
if limitation != "":
|
262 |
output_image = output_image.resize((output_width, output_height))
|
|
|
280 |
mask_image
|
281 |
]
|
282 |
|
283 |
+
@spaces.GPU(duration=120)
|
284 |
+
def uncrop_on_gpu(
|
285 |
+
seed,
|
286 |
+
process_width,
|
287 |
+
process_height,
|
288 |
+
prompt,
|
289 |
+
negative_prompt,
|
290 |
+
enlarged_image,
|
291 |
+
mask_image,
|
292 |
+
num_inference_steps,
|
293 |
+
guidance_scale,
|
294 |
+
image_guidance_scale,
|
295 |
+
strength,
|
296 |
+
denoising_steps
|
297 |
+
):
|
298 |
+
return pipe(
|
299 |
+
seeds = [seed],
|
300 |
+
width = process_width,
|
301 |
+
height = process_height,
|
302 |
+
prompt = prompt,
|
303 |
+
negative_prompt = negative_prompt,
|
304 |
+
image = enlarged_image,
|
305 |
+
mask_image = mask_image,
|
306 |
+
num_inference_steps = num_inference_steps,
|
307 |
+
guidance_scale = guidance_scale,
|
308 |
+
image_guidance_scale = image_guidance_scale,
|
309 |
+
strength = strength,
|
310 |
+
denoising_steps = denoising_steps,
|
311 |
+
show_progress_bar = True
|
312 |
+
).images[0]
|
313 |
+
|
314 |
with gr.Blocks() as interface:
|
315 |
gr.HTML(
|
316 |
DESCRIPTION
|