Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,7 +17,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
| 17 |
seed = random.randint(0, MAX_SEED)
|
| 18 |
return seed
|
| 19 |
|
| 20 |
-
def get_instantID(portrait_in, condition_pose, prompt, style):
|
| 21 |
|
| 22 |
negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green"
|
| 23 |
|
|
@@ -37,7 +37,7 @@ def get_instantID(portrait_in, condition_pose, prompt, style):
|
|
| 37 |
0.4, # float (numeric value between 0 and 1.5) in 'Pose strength' Slider component
|
| 38 |
0.4, # float (numeric value between 0 and 1.5) in 'Canny strength' Slider component
|
| 39 |
0.4, # float (numeric value between 0 and 1.5) in 'Depth strength' Slider component
|
| 40 |
-
|
| 41 |
1.5, # float (numeric value between 0.1 and 20.0) in 'Guidance scale' Slider component
|
| 42 |
seed, # float (numeric value between 0 and 2147483647) in 'Seed' Slider component
|
| 43 |
"EulerDiscreteScheduler", # Literal['DEISMultistepScheduler', 'HeunDiscreteScheduler', 'EulerDiscreteScheduler', 'DPMSolverMultistepScheduler', 'DPMSolverMultistepScheduler-Karras', 'DPMSolverMultistepScheduler-Karras-SDE'] in 'Schedulers' Dropdown component
|
|
@@ -105,7 +105,7 @@ def get_short_caption(image_in):
|
|
| 105 |
print(result)
|
| 106 |
return result
|
| 107 |
|
| 108 |
-
def infer(image_in, camera_shot, conditional_pose, prompt, style, chosen_model):
|
| 109 |
|
| 110 |
if camera_shot == "custom":
|
| 111 |
if conditional_pose != None:
|
|
@@ -114,7 +114,7 @@ def infer(image_in, camera_shot, conditional_pose, prompt, style, chosen_model):
|
|
| 114 |
raise gr.Error("No custom conditional shot found !")
|
| 115 |
|
| 116 |
|
| 117 |
-
iid_img = get_instantID(image_in, conditional_pose, prompt, style)
|
| 118 |
|
| 119 |
#short_cap = get_short_caption(iid_img)
|
| 120 |
|
|
@@ -150,18 +150,23 @@ with gr.Blocks(css=css) as demo:
|
|
| 150 |
with gr.Column():
|
| 151 |
face_in = gr.Image(type="filepath", label="Face to copy", value="monalisa.png")
|
| 152 |
with gr.Column():
|
| 153 |
-
with gr.
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
)
|
| 162 |
-
style = gr.Dropdown(label="Style template", info="InstantID legacy templates", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 163 |
-
|
| 164 |
-
condition_shot = gr.Image(type="filepath", label="Custom conditional shot (Important) [1280*720 recommended]")
|
| 165 |
prompt = gr.Textbox(label="Short Prompt (keeping it short is better)")
|
| 166 |
chosen_model = gr.Radio(label="Choose a model", choices=["i2vgen-xl", "stable-video"], value="i2vgen-xl", interactive=False, visible=False)
|
| 167 |
|
|
@@ -188,6 +193,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 188 |
face_in,
|
| 189 |
camera_shot,
|
| 190 |
condition_shot,
|
|
|
|
| 191 |
prompt,
|
| 192 |
style,
|
| 193 |
chosen_model
|
|
|
|
| 17 |
seed = random.randint(0, MAX_SEED)
|
| 18 |
return seed
|
| 19 |
|
| 20 |
+
def get_instantID(portrait_in, condition_pose, controlnet, prompt, style):
|
| 21 |
|
| 22 |
negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green"
|
| 23 |
|
|
|
|
| 37 |
0.4, # float (numeric value between 0 and 1.5) in 'Pose strength' Slider component
|
| 38 |
0.4, # float (numeric value between 0 and 1.5) in 'Canny strength' Slider component
|
| 39 |
0.4, # float (numeric value between 0 and 1.5) in 'Depth strength' Slider component
|
| 40 |
+
controlnet, # List[Literal['pose', 'canny', 'depth']] in 'Controlnet' Checkboxgroup component
|
| 41 |
1.5, # float (numeric value between 0.1 and 20.0) in 'Guidance scale' Slider component
|
| 42 |
seed, # float (numeric value between 0 and 2147483647) in 'Seed' Slider component
|
| 43 |
"EulerDiscreteScheduler", # Literal['DEISMultistepScheduler', 'HeunDiscreteScheduler', 'EulerDiscreteScheduler', 'DPMSolverMultistepScheduler', 'DPMSolverMultistepScheduler-Karras', 'DPMSolverMultistepScheduler-Karras-SDE'] in 'Schedulers' Dropdown component
|
|
|
|
| 105 |
print(result)
|
| 106 |
return result
|
| 107 |
|
| 108 |
+
def infer(image_in, camera_shot, conditional_pose, controlnet_selection, prompt, style, chosen_model):
|
| 109 |
|
| 110 |
if camera_shot == "custom":
|
| 111 |
if conditional_pose != None:
|
|
|
|
| 114 |
raise gr.Error("No custom conditional shot found !")
|
| 115 |
|
| 116 |
|
| 117 |
+
iid_img = get_instantID(image_in, conditional_pose, controlnet_selection, prompt, style)
|
| 118 |
|
| 119 |
#short_cap = get_short_caption(iid_img)
|
| 120 |
|
|
|
|
| 150 |
with gr.Column():
|
| 151 |
face_in = gr.Image(type="filepath", label="Face to copy", value="monalisa.png")
|
| 152 |
with gr.Column():
|
| 153 |
+
with gr.Group():
|
| 154 |
+
with gr.Row():
|
| 155 |
+
camera_shot = gr.Dropdown(
|
| 156 |
+
label = "Camera Shot",
|
| 157 |
+
info = "Use standard camera shots vocabulary, or drop your custom shot as conditional pose (1280*720 ratio is recommended)",
|
| 158 |
+
choices = [
|
| 159 |
+
"custom", "close-up", "medium close-up", "medium shot", "cowboy shot", "medium full shot", "full shot"
|
| 160 |
+
],
|
| 161 |
+
value = "custom"
|
| 162 |
+
)
|
| 163 |
+
style = gr.Dropdown(label="Style template", info="InstantID legacy templates", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 164 |
+
|
| 165 |
+
condition_shot = gr.Image(type="filepath", label="Custom conditional shot (Important) [1280*720 recommended]")
|
| 166 |
+
controlnet_selection = gr.CheckboxGroup(
|
| 167 |
+
["pose", "canny", "depth"], label="Controlnet", value=["pose"],
|
| 168 |
+
info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
|
| 169 |
)
|
|
|
|
|
|
|
|
|
|
| 170 |
prompt = gr.Textbox(label="Short Prompt (keeping it short is better)")
|
| 171 |
chosen_model = gr.Radio(label="Choose a model", choices=["i2vgen-xl", "stable-video"], value="i2vgen-xl", interactive=False, visible=False)
|
| 172 |
|
|
|
|
| 193 |
face_in,
|
| 194 |
camera_shot,
|
| 195 |
condition_shot,
|
| 196 |
+
controlnet_selection,
|
| 197 |
prompt,
|
| 198 |
style,
|
| 199 |
chosen_model
|