Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -21,7 +21,7 @@ from insightface.app import FaceAnalysis
|
|
| 21 |
from style_template import styles
|
| 22 |
from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
|
| 23 |
|
| 24 |
-
|
| 25 |
|
| 26 |
import gradio as gr
|
| 27 |
|
|
@@ -58,7 +58,7 @@ app = FaceAnalysis(
|
|
| 58 |
)
|
| 59 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
| 60 |
|
| 61 |
-
|
| 62 |
|
| 63 |
depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
|
| 64 |
|
|
@@ -86,13 +86,13 @@ controlnet_identitynet = ControlNetModel.from_pretrained(
|
|
| 86 |
)
|
| 87 |
|
| 88 |
# controlnet-pose/canny/depth
|
| 89 |
-
|
| 90 |
controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
|
| 91 |
controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
controlnet_canny = ControlNetModel.from_pretrained(
|
| 97 |
controlnet_canny_model, torch_dtype=dtype
|
| 98 |
).to(device)
|
|
@@ -127,12 +127,12 @@ def get_canny_image(image, t1=100, t2=200):
|
|
| 127 |
return Image.fromarray(edges, "L")
|
| 128 |
|
| 129 |
controlnet_map = {
|
| 130 |
-
|
| 131 |
"canny": controlnet_canny,
|
| 132 |
"depth": controlnet_depth,
|
| 133 |
}
|
| 134 |
controlnet_map_fn = {
|
| 135 |
-
|
| 136 |
"canny": get_canny_image,
|
| 137 |
"depth": get_depth_map,
|
| 138 |
}
|
|
@@ -230,10 +230,10 @@ def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
|
|
| 230 |
20, # num_steps
|
| 231 |
0.8, # identitynet_strength_ratio
|
| 232 |
0.8, # adapter_strength_ratio
|
| 233 |
-
|
| 234 |
0.3, # canny_strength
|
| 235 |
0.5, # depth_strength
|
| 236 |
-
["
|
| 237 |
5.0, # guidance_scale
|
| 238 |
42, # seed
|
| 239 |
"EulerDiscreteScheduler", # scheduler
|
|
@@ -294,7 +294,7 @@ def generate_image(
|
|
| 294 |
num_steps,
|
| 295 |
identitynet_strength_ratio,
|
| 296 |
adapter_strength_ratio,
|
| 297 |
-
|
| 298 |
canny_strength,
|
| 299 |
depth_strength,
|
| 300 |
controlnet_selection,
|
|
@@ -383,7 +383,7 @@ def generate_image(
|
|
| 383 |
|
| 384 |
if len(controlnet_selection) > 0:
|
| 385 |
controlnet_scales = {
|
| 386 |
-
|
| 387 |
"canny": canny_strength,
|
| 388 |
"depth": depth_strength,
|
| 389 |
}
|
|
@@ -432,9 +432,7 @@ title = r"""
|
|
| 432 |
|
| 433 |
description = r"""
|
| 434 |
<b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
|
| 435 |
-
|
| 436 |
We are organizing a Spring Festival event with HuggingFace from 2.7 to 2.25, and you can now generate pictures of Spring Festival costumes. Happy Dragon Year 🐲 ! Share the joy with your family.<br>
|
| 437 |
-
|
| 438 |
How to use:<br>
|
| 439 |
1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
|
| 440 |
2. (Optional) You can upload another image as a reference for the face pose. If you don't, we will use the first detected face image to extract facial landmarks. If you use a cropped face at step 1, it is recommended to upload it to define a new face pose.
|
|
@@ -526,16 +524,16 @@ with gr.Blocks(css=css) as demo:
|
|
| 526 |
)
|
| 527 |
with gr.Accordion("Controlnet"):
|
| 528 |
controlnet_selection = gr.CheckboxGroup(
|
| 529 |
-
["canny", "depth"], label="Controlnet", value=["
|
| 530 |
info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
|
| 531 |
)
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
canny_strength = gr.Slider(
|
| 540 |
label="Canny strength",
|
| 541 |
minimum=0,
|
|
@@ -619,7 +617,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 619 |
num_steps,
|
| 620 |
identitynet_strength_ratio,
|
| 621 |
adapter_strength_ratio,
|
| 622 |
-
|
| 623 |
canny_strength,
|
| 624 |
depth_strength,
|
| 625 |
controlnet_selection,
|
|
@@ -650,4 +648,4 @@ with gr.Blocks(css=css) as demo:
|
|
| 650 |
gr.Markdown(article)
|
| 651 |
|
| 652 |
demo.queue(api_open=False)
|
| 653 |
-
demo.launch()
|
|
|
|
| 21 |
from style_template import styles
|
| 22 |
from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
|
| 23 |
|
| 24 |
+
from controlnet_aux import OpenposeDetector
|
| 25 |
|
| 26 |
import gradio as gr
|
| 27 |
|
|
|
|
| 58 |
)
|
| 59 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
| 60 |
|
| 61 |
+
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
| 62 |
|
| 63 |
depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
|
| 64 |
|
|
|
|
| 86 |
)
|
| 87 |
|
| 88 |
# controlnet-pose/canny/depth
|
| 89 |
+
controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
|
| 90 |
controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
|
| 91 |
controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
|
| 92 |
|
| 93 |
+
controlnet_pose = ControlNetModel.from_pretrained(
|
| 94 |
+
controlnet_pose_model, torch_dtype=dtype
|
| 95 |
+
).to(device)
|
| 96 |
controlnet_canny = ControlNetModel.from_pretrained(
|
| 97 |
controlnet_canny_model, torch_dtype=dtype
|
| 98 |
).to(device)
|
|
|
|
| 127 |
return Image.fromarray(edges, "L")
|
| 128 |
|
| 129 |
controlnet_map = {
|
| 130 |
+
"pose": controlnet_pose,
|
| 131 |
"canny": controlnet_canny,
|
| 132 |
"depth": controlnet_depth,
|
| 133 |
}
|
| 134 |
controlnet_map_fn = {
|
| 135 |
+
"pose": openpose,
|
| 136 |
"canny": get_canny_image,
|
| 137 |
"depth": get_depth_map,
|
| 138 |
}
|
|
|
|
| 230 |
20, # num_steps
|
| 231 |
0.8, # identitynet_strength_ratio
|
| 232 |
0.8, # adapter_strength_ratio
|
| 233 |
+
0.4, # pose_strength
|
| 234 |
0.3, # canny_strength
|
| 235 |
0.5, # depth_strength
|
| 236 |
+
["pose", "canny"], # controlnet_selection
|
| 237 |
5.0, # guidance_scale
|
| 238 |
42, # seed
|
| 239 |
"EulerDiscreteScheduler", # scheduler
|
|
|
|
| 294 |
num_steps,
|
| 295 |
identitynet_strength_ratio,
|
| 296 |
adapter_strength_ratio,
|
| 297 |
+
pose_strength,
|
| 298 |
canny_strength,
|
| 299 |
depth_strength,
|
| 300 |
controlnet_selection,
|
|
|
|
| 383 |
|
| 384 |
if len(controlnet_selection) > 0:
|
| 385 |
controlnet_scales = {
|
| 386 |
+
"pose": pose_strength,
|
| 387 |
"canny": canny_strength,
|
| 388 |
"depth": depth_strength,
|
| 389 |
}
|
|
|
|
| 432 |
|
| 433 |
description = r"""
|
| 434 |
<b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
|
|
|
|
| 435 |
We are organizing a Spring Festival event with HuggingFace from 2.7 to 2.25, and you can now generate pictures of Spring Festival costumes. Happy Dragon Year 🐲 ! Share the joy with your family.<br>
|
|
|
|
| 436 |
How to use:<br>
|
| 437 |
1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
|
| 438 |
2. (Optional) You can upload another image as a reference for the face pose. If you don't, we will use the first detected face image to extract facial landmarks. If you use a cropped face at step 1, it is recommended to upload it to define a new face pose.
|
|
|
|
| 524 |
)
|
| 525 |
with gr.Accordion("Controlnet"):
|
| 526 |
controlnet_selection = gr.CheckboxGroup(
|
| 527 |
+
["pose", "canny", "depth"], label="Controlnet", value=["pose"],
|
| 528 |
info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
|
| 529 |
)
|
| 530 |
+
pose_strength = gr.Slider(
|
| 531 |
+
label="Pose strength",
|
| 532 |
+
minimum=0,
|
| 533 |
+
maximum=1.5,
|
| 534 |
+
step=0.05,
|
| 535 |
+
value=0.40,
|
| 536 |
+
)
|
| 537 |
canny_strength = gr.Slider(
|
| 538 |
label="Canny strength",
|
| 539 |
minimum=0,
|
|
|
|
| 617 |
num_steps,
|
| 618 |
identitynet_strength_ratio,
|
| 619 |
adapter_strength_ratio,
|
| 620 |
+
pose_strength,
|
| 621 |
canny_strength,
|
| 622 |
depth_strength,
|
| 623 |
controlnet_selection,
|
|
|
|
| 648 |
gr.Markdown(article)
|
| 649 |
|
| 650 |
demo.queue(api_open=False)
|
| 651 |
+
demo.launch()
|