Spaces:
Runtime error
Runtime error
added signatre based check
Browse files- check_app.py +14 -7
check_app.py
CHANGED
|
@@ -46,7 +46,14 @@ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, s
|
|
| 46 |
progress(cur_prg, desc=f"Step {step_index}/{num_steps}")
|
| 47 |
return callback_kwargs
|
| 48 |
print(f"START GENR ")
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
print("has callback_on_step_end and has guidance_scale")
|
| 51 |
image = pipe(
|
| 52 |
prompt,
|
|
@@ -55,7 +62,7 @@ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, s
|
|
| 55 |
guidance_scale=guidance_scale,
|
| 56 |
callback_on_step_end=callback,
|
| 57 |
).images[0]
|
| 58 |
-
elif not
|
| 59 |
print("NO callback_on_step_end and has guidance_scale")
|
| 60 |
image = pipe(
|
| 61 |
prompt,
|
|
@@ -63,15 +70,15 @@ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, s
|
|
| 63 |
guidance_scale=guidance_scale,
|
| 64 |
generator=generator,
|
| 65 |
).images[0]
|
| 66 |
-
elif
|
| 67 |
-
print("
|
| 68 |
image = pipe(
|
| 69 |
prompt,
|
| 70 |
num_inference_steps=num_steps,
|
| 71 |
generator=generator,
|
| 72 |
-
callback_on_step_end=callback
|
| 73 |
).images[0]
|
| 74 |
-
elif not
|
| 75 |
print("NO callback_on_step_end and NO guidance_scale")
|
| 76 |
image = pipe(
|
| 77 |
prompt,
|
|
@@ -102,7 +109,7 @@ def create_pipeline_logic(prompt_text, model_name):
|
|
| 102 |
#cache_dir=config["cache_dir"],
|
| 103 |
torch_dtype=torch.bfloat16
|
| 104 |
).to("cuda")
|
| 105 |
-
|
| 106 |
image = generate_image_with_progress(
|
| 107 |
pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
|
| 108 |
)
|
|
|
|
| 46 |
progress(cur_prg, desc=f"Step {step_index}/{num_steps}")
|
| 47 |
return callback_kwargs
|
| 48 |
print(f"START GENR ")
|
| 49 |
+
# Get the signature of the pipe
|
| 50 |
+
pipe_signature = signature(pipe)
|
| 51 |
+
|
| 52 |
+
# Check for the presence of "guidance_scale" and "callback_on_step_end" in the signature
|
| 53 |
+
has_guidance_scale = "guidance_scale" in pipe_signature.parameters
|
| 54 |
+
has_callback_on_step_end = "callback_on_step_end" in pipe_signature.parameters
|
| 55 |
+
|
| 56 |
+
if has_guidance_scale and has_callback_on_step_end:
|
| 57 |
print("has callback_on_step_end and has guidance_scale")
|
| 58 |
image = pipe(
|
| 59 |
prompt,
|
|
|
|
| 62 |
guidance_scale=guidance_scale,
|
| 63 |
callback_on_step_end=callback,
|
| 64 |
).images[0]
|
| 65 |
+
elif not has_callback_on_step_end and has_guidance_scale:
|
| 66 |
print("NO callback_on_step_end and has guidance_scale")
|
| 67 |
image = pipe(
|
| 68 |
prompt,
|
|
|
|
| 70 |
guidance_scale=guidance_scale,
|
| 71 |
generator=generator,
|
| 72 |
).images[0]
|
| 73 |
+
elif has_callback_on_step_end and not has_guidance_scale:
|
| 74 |
+
print("has callback_on_step_end and NO guidance_scale")
|
| 75 |
image = pipe(
|
| 76 |
prompt,
|
| 77 |
num_inference_steps=num_steps,
|
| 78 |
generator=generator,
|
| 79 |
+
callback_on_step_end=callback,
|
| 80 |
).images[0]
|
| 81 |
+
elif not has_callback_on_step_end and not has_guidance_scale:
|
| 82 |
print("NO callback_on_step_end and NO guidance_scale")
|
| 83 |
image = pipe(
|
| 84 |
prompt,
|
|
|
|
| 109 |
#cache_dir=config["cache_dir"],
|
| 110 |
torch_dtype=torch.bfloat16
|
| 111 |
).to("cuda")
|
| 112 |
+
|
| 113 |
image = generate_image_with_progress(
|
| 114 |
pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
|
| 115 |
)
|