Spaces:
Running
on
A10G
Running
on
A10G
Double generation time due to low demand
Browse files
app.py
CHANGED
@@ -86,7 +86,7 @@ def detect(image):
|
|
86 |
# ddim inversion
|
87 |
img = transform_img(image).unsqueeze(0).to(pipe.unet.dtype).to(pipe.device)
|
88 |
image_latents = pipe.vae.encode(img).latent_dist.mode() * 0.13025
|
89 |
-
inverted_latents = pipe(prompt="", latents=image_latents, guidance_scale=1, num_inference_steps=
|
90 |
inverted_latents = inverted_latents.images
|
91 |
|
92 |
# calculate p-value instead of detection threshold. more rigorous, plus we can do a non-boolean output
|
@@ -109,7 +109,7 @@ def detect(image):
|
|
109 |
return max(0.0, 1-1/math.log(5/p_value,10))
|
110 |
|
111 |
def generate(prompt):
|
112 |
-
return pipe(prompt=prompt, negative_prompt="monochrome", num_inference_steps=
|
113 |
|
114 |
# optimize for speed
|
115 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
|
86 |
# ddim inversion
|
87 |
img = transform_img(image).unsqueeze(0).to(pipe.unet.dtype).to(pipe.device)
|
88 |
image_latents = pipe.vae.encode(img).latent_dist.mode() * 0.13025
|
89 |
+
inverted_latents = pipe(prompt="", latents=image_latents, guidance_scale=1, num_inference_steps=50, output_type="latent")
|
90 |
inverted_latents = inverted_latents.images
|
91 |
|
92 |
# calculate p-value instead of detection threshold. more rigorous, plus we can do a non-boolean output
|
|
|
109 |
return max(0.0, 1-1/math.log(5/p_value,10))
|
110 |
|
111 |
def generate(prompt):
|
112 |
+
return pipe(prompt=prompt, negative_prompt="monochrome", num_inference_steps=50, latents=get_noise()).images[0]
|
113 |
|
114 |
# optimize for speed
|
115 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|