Update handler.py
Browse files- handler.py +7 -7
handler.py
CHANGED
|
@@ -96,7 +96,7 @@ class EndpointHandler():
|
|
| 96 |
self.pipe.enable_xformers_memory_efficient_attention()
|
| 97 |
|
| 98 |
# run inference pipeline
|
| 99 |
-
out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image)
|
| 100 |
|
| 101 |
print("1st pipeline part successful!")
|
| 102 |
|
|
@@ -111,9 +111,9 @@ class EndpointHandler():
|
|
| 111 |
negative_prompt=negative_prompt,
|
| 112 |
image=image,
|
| 113 |
mask_image=mask_image,
|
| 114 |
-
guidance_scale=
|
| 115 |
-
num_inference_steps=
|
| 116 |
-
strength=
|
| 117 |
output_type="latent", # let's keep in latent to save some VRAM
|
| 118 |
).images[0]
|
| 119 |
|
|
@@ -124,9 +124,9 @@ class EndpointHandler():
|
|
| 124 |
image2 = self.pipe3(
|
| 125 |
prompt=prompt,
|
| 126 |
image=image,
|
| 127 |
-
guidance_scale=
|
| 128 |
-
num_inference_steps=
|
| 129 |
-
strength=
|
| 130 |
).images[0]
|
| 131 |
|
| 132 |
print("3rd pipeline part successful!")
|
|
|
|
| 96 |
self.pipe.enable_xformers_memory_efficient_attention()
|
| 97 |
|
| 98 |
# run inference pipeline
|
| 99 |
+
out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, strength=0.99)
|
| 100 |
|
| 101 |
print("1st pipeline part successful!")
|
| 102 |
|
|
|
|
| 111 |
negative_prompt=negative_prompt,
|
| 112 |
image=image,
|
| 113 |
mask_image=mask_image,
|
| 114 |
+
guidance_scale=guidance_scale, #8.0
|
| 115 |
+
num_inference_steps=num_inference_steps, #100
|
| 116 |
+
strength=strength, #0.2
|
| 117 |
output_type="latent", # let's keep in latent to save some VRAM
|
| 118 |
).images[0]
|
| 119 |
|
|
|
|
| 124 |
image2 = self.pipe3(
|
| 125 |
prompt=prompt,
|
| 126 |
image=image,
|
| 127 |
+
guidance_scale=guidance_scale, #8.0
|
| 128 |
+
num_inference_steps=num_inference_steps, #100
|
| 129 |
+
strength=strength, #0.2
|
| 130 |
).images[0]
|
| 131 |
|
| 132 |
print("3rd pipeline part successful!")
|