Spaces:
Runtime error
Runtime error
refactor
Browse files- app.py +109 -146
- models/controlnet.py +51 -0
- models/embeds.py +5 -0
- utils/model_utils.py +5 -1
- utils/string_utils.py +45 -0
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import spaces
|
| 2 |
import os
|
| 3 |
from stablepy import Model_Diffusers
|
| 4 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
|
@@ -31,63 +31,13 @@ from models.checkpoints import CHECKPOINT_LIST as download_model
|
|
| 31 |
from models.loras import LORA_LIST as download_lora
|
| 32 |
from models.format_models import FORMAT_MODELS as load_diffusers_format_model
|
| 33 |
from models.upscaler import upscaler_dict_gui
|
|
|
|
|
|
|
| 34 |
from examples.examples import example_prompts
|
| 35 |
from utils.download_utils import download_things
|
| 36 |
from utils.model_utils import get_model_list
|
| 37 |
|
| 38 |
-
|
| 39 |
-
"openpose": [
|
| 40 |
-
"Openpose",
|
| 41 |
-
"None",
|
| 42 |
-
],
|
| 43 |
-
"scribble": [
|
| 44 |
-
"HED",
|
| 45 |
-
"Pidinet",
|
| 46 |
-
"None",
|
| 47 |
-
],
|
| 48 |
-
"softedge": [
|
| 49 |
-
"Pidinet",
|
| 50 |
-
"HED",
|
| 51 |
-
"HED safe",
|
| 52 |
-
"Pidinet safe",
|
| 53 |
-
"None",
|
| 54 |
-
],
|
| 55 |
-
"segmentation": [
|
| 56 |
-
"UPerNet",
|
| 57 |
-
"None",
|
| 58 |
-
],
|
| 59 |
-
"depth": [
|
| 60 |
-
"DPT",
|
| 61 |
-
"Midas",
|
| 62 |
-
"None",
|
| 63 |
-
],
|
| 64 |
-
"normalbae": [
|
| 65 |
-
"NormalBae",
|
| 66 |
-
"None",
|
| 67 |
-
],
|
| 68 |
-
"lineart": [
|
| 69 |
-
"Lineart",
|
| 70 |
-
"Lineart coarse",
|
| 71 |
-
"Lineart (anime)",
|
| 72 |
-
"None",
|
| 73 |
-
"None (anime)",
|
| 74 |
-
],
|
| 75 |
-
"shuffle": [
|
| 76 |
-
"ContentShuffle",
|
| 77 |
-
"None",
|
| 78 |
-
],
|
| 79 |
-
"canny": [
|
| 80 |
-
"Canny"
|
| 81 |
-
],
|
| 82 |
-
"mlsd": [
|
| 83 |
-
"MLSD"
|
| 84 |
-
],
|
| 85 |
-
"ip2p": [
|
| 86 |
-
"ip2p"
|
| 87 |
-
]
|
| 88 |
-
}
|
| 89 |
-
|
| 90 |
-
task_stablepy = {
|
| 91 |
'txt2img': 'txt2img',
|
| 92 |
'img2img': 'img2img',
|
| 93 |
'inpaint': 'inpaint',
|
|
@@ -111,20 +61,34 @@ task_stablepy = {
|
|
| 111 |
'optical pattern ControlNet': 'pattern',
|
| 112 |
'tile realistic': 'sdxl_tile_realistic',
|
| 113 |
}
|
| 114 |
-
|
| 115 |
-
task_model_list = list(task_stablepy.keys())
|
| 116 |
-
|
| 117 |
-
directory_models = 'models'
|
| 118 |
-
os.makedirs(directory_models, exist_ok=True)
|
| 119 |
-
directory_loras = 'loras'
|
| 120 |
-
os.makedirs(directory_loras, exist_ok=True)
|
| 121 |
-
directory_vaes = 'vaes'
|
| 122 |
-
os.makedirs(directory_vaes, exist_ok=True)
|
| 123 |
-
|
| 124 |
# LOAD ALL ENV TOKEN
|
| 125 |
CIVITAI_API_KEY: str = os.environ.get("CIVITAI_API_KEY")
|
| 126 |
hf_token: str = os.environ.get("HF_TOKEN")
|
| 127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
# Download stuffs
|
| 129 |
for url in [url.strip() for url in download_model.split(',')]:
|
| 130 |
if not os.path.exists(f"./models/{url.split('/')[-1]}"):
|
|
@@ -153,18 +117,6 @@ for url in [url.strip() for url in download_lora.split(',')]:
|
|
| 153 |
CIVITAI_API_KEY
|
| 154 |
)
|
| 155 |
|
| 156 |
-
# Download Embeddings
|
| 157 |
-
directory_embeds = 'embedings'
|
| 158 |
-
os.makedirs(
|
| 159 |
-
directory_embeds,
|
| 160 |
-
exist_ok=True
|
| 161 |
-
)
|
| 162 |
-
download_embeds = [
|
| 163 |
-
'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
|
| 164 |
-
'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
|
| 165 |
-
'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
|
| 166 |
-
]
|
| 167 |
-
|
| 168 |
for url_embed in download_embeds:
|
| 169 |
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
|
| 170 |
download_things(
|
|
@@ -185,11 +137,11 @@ vae_model_list.insert(0, "None")
|
|
| 185 |
|
| 186 |
|
| 187 |
def get_my_lora(link_url):
|
| 188 |
-
for
|
| 189 |
-
if not os.path.exists(f"./loras/{
|
| 190 |
download_things(
|
| 191 |
directory_loras,
|
| 192 |
-
|
| 193 |
hf_token,
|
| 194 |
CIVITAI_API_KEY
|
| 195 |
)
|
|
@@ -212,47 +164,6 @@ def get_my_lora(link_url):
|
|
| 212 |
|
| 213 |
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
|
| 214 |
|
| 215 |
-
|
| 216 |
-
def extract_parameters(input_string):
|
| 217 |
-
parameters = {}
|
| 218 |
-
input_string = input_string.replace("\n", "")
|
| 219 |
-
|
| 220 |
-
if not "Negative prompt:" in input_string:
|
| 221 |
-
print("Negative prompt not detected")
|
| 222 |
-
parameters["prompt"] = input_string
|
| 223 |
-
return parameters
|
| 224 |
-
|
| 225 |
-
parm = input_string.split("Negative prompt:")
|
| 226 |
-
parameters["prompt"] = parm[0]
|
| 227 |
-
if not "Steps:" in parm[1]:
|
| 228 |
-
print("Steps not detected")
|
| 229 |
-
parameters["neg_prompt"] = parm[1]
|
| 230 |
-
return parameters
|
| 231 |
-
parm = parm[1].split("Steps:")
|
| 232 |
-
parameters["neg_prompt"] = parm[0]
|
| 233 |
-
input_string = "Steps:" + parm[1]
|
| 234 |
-
|
| 235 |
-
# Extracting Steps
|
| 236 |
-
steps_match = re.search(r'Steps: (\d+)', input_string)
|
| 237 |
-
if steps_match:
|
| 238 |
-
parameters['Steps'] = int(steps_match.group(1))
|
| 239 |
-
|
| 240 |
-
# Extracting Size
|
| 241 |
-
size_match = re.search(r'Size: (\d+x\d+)', input_string)
|
| 242 |
-
if size_match:
|
| 243 |
-
parameters['Size'] = size_match.group(1)
|
| 244 |
-
width, height = map(int, parameters['Size'].split('x'))
|
| 245 |
-
parameters['width'] = width
|
| 246 |
-
parameters['height'] = height
|
| 247 |
-
|
| 248 |
-
# Extracting other parameters
|
| 249 |
-
other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
|
| 250 |
-
for param in other_parameters:
|
| 251 |
-
parameters[param[0]] = param[1].strip('"')
|
| 252 |
-
|
| 253 |
-
return parameters
|
| 254 |
-
|
| 255 |
-
|
| 256 |
#######################
|
| 257 |
# GUI
|
| 258 |
#######################
|
|
@@ -263,6 +174,8 @@ import IPython.display
|
|
| 263 |
import time, json
|
| 264 |
from IPython.utils import capture
|
| 265 |
import logging
|
|
|
|
|
|
|
| 266 |
|
| 267 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
| 268 |
import diffusers
|
|
@@ -285,7 +198,6 @@ warnings.filterwarnings(
|
|
| 285 |
category=FutureWarning,
|
| 286 |
module="transformers"
|
| 287 |
)
|
| 288 |
-
from stablepy import logger
|
| 289 |
|
| 290 |
logger.setLevel(logging.DEBUG)
|
| 291 |
|
|
@@ -448,7 +360,7 @@ class GuiSD:
|
|
| 448 |
vae_model = vae_model if vae_model != "None" else None
|
| 449 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
| 450 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
| 451 |
-
msg_lora = []
|
| 452 |
|
| 453 |
if model_name in model_list:
|
| 454 |
model_is_xl = "xl" in model_name.lower()
|
|
@@ -471,13 +383,15 @@ class GuiSD:
|
|
| 471 |
vae_model = None
|
| 472 |
|
| 473 |
for la in loras_list:
|
| 474 |
-
if la is
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
|
|
|
|
|
|
| 481 |
|
| 482 |
task = task_stablepy[task]
|
| 483 |
|
|
@@ -718,6 +632,7 @@ def update_task_options(model_name, task_name):
|
|
| 718 |
)
|
| 719 |
|
| 720 |
|
|
|
|
| 721 |
with gr.Blocks(css=CSS) as app:
|
| 722 |
gr.Markdown("# 🧩 (Ivan) DiffuseCraft")
|
| 723 |
with gr.Tab("Generation"):
|
|
@@ -846,7 +761,7 @@ with gr.Blocks(css=CSS) as app:
|
|
| 846 |
}
|
| 847 |
valid_keys = list(valid_receptors.keys())
|
| 848 |
|
| 849 |
-
parameters = extract_parameters(base_prompt)
|
| 850 |
for key, val in parameters.items():
|
| 851 |
# print(val)
|
| 852 |
if key in valid_keys:
|
|
@@ -930,7 +845,7 @@ with gr.Blocks(css=CSS) as app:
|
|
| 930 |
vae_model_gui = gr.Dropdown(
|
| 931 |
label="VAE Model",
|
| 932 |
choices=vae_model_list,
|
| 933 |
-
value=vae_model_list[
|
| 934 |
)
|
| 935 |
|
| 936 |
with gr.Accordion("Hires fix", open=False, visible=True):
|
|
@@ -1061,11 +976,16 @@ with gr.Blocks(css=CSS) as app:
|
|
| 1061 |
button_lora.click(
|
| 1062 |
get_my_lora,
|
| 1063 |
[text_lora],
|
| 1064 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1065 |
)
|
| 1066 |
|
| 1067 |
-
with gr.Accordion("IP-Adapter", open=False, visible=True):
|
| 1068 |
-
|
| 1069 |
IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
|
| 1070 |
MODE_IP_OPTIONS = [
|
| 1071 |
"original",
|
|
@@ -1075,17 +995,58 @@ with gr.Blocks(css=CSS) as app:
|
|
| 1075 |
]
|
| 1076 |
|
| 1077 |
with gr.Accordion("IP-Adapter 1", open=False, visible=True):
|
| 1078 |
-
image_ip1 = gr.Image(
|
| 1079 |
-
|
| 1080 |
-
|
| 1081 |
-
|
| 1082 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1083 |
with gr.Accordion("IP-Adapter 2", open=False, visible=True):
|
| 1084 |
-
image_ip2 = gr.Image(
|
| 1085 |
-
|
| 1086 |
-
|
| 1087 |
-
|
| 1088 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1089 |
|
| 1090 |
with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
|
| 1091 |
image_control = gr.Image(
|
|
@@ -1122,7 +1083,10 @@ with gr.Blocks(css=CSS) as app:
|
|
| 1122 |
choices_task = preprocessor_controlnet[task]
|
| 1123 |
else:
|
| 1124 |
choices_task = preprocessor_controlnet["canny"]
|
| 1125 |
-
return gr.update(
|
|
|
|
|
|
|
|
|
|
| 1126 |
|
| 1127 |
|
| 1128 |
task_gui.change(
|
|
@@ -1689,7 +1653,6 @@ with gr.Blocks(css=CSS) as app:
|
|
| 1689 |
)
|
| 1690 |
|
| 1691 |
app.queue()
|
| 1692 |
-
|
| 1693 |
app.launch(
|
| 1694 |
show_error=True,
|
| 1695 |
debug=True,
|
|
|
|
| 1 |
+
import spaces # must imported when using spaces
|
| 2 |
import os
|
| 3 |
from stablepy import Model_Diffusers
|
| 4 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
|
|
|
| 31 |
from models.loras import LORA_LIST as download_lora
|
| 32 |
from models.format_models import FORMAT_MODELS as load_diffusers_format_model
|
| 33 |
from models.upscaler import upscaler_dict_gui
|
| 34 |
+
from models.controlnet import preprocessor_controlnet
|
| 35 |
+
from models.embeds import download_embeds
|
| 36 |
from examples.examples import example_prompts
|
| 37 |
from utils.download_utils import download_things
|
| 38 |
from utils.model_utils import get_model_list
|
| 39 |
|
| 40 |
+
task_stablepy: dict = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
'txt2img': 'txt2img',
|
| 42 |
'img2img': 'img2img',
|
| 43 |
'inpaint': 'inpaint',
|
|
|
|
| 61 |
'optical pattern ControlNet': 'pattern',
|
| 62 |
'tile realistic': 'sdxl_tile_realistic',
|
| 63 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
# LOAD ALL ENV TOKEN
|
| 65 |
CIVITAI_API_KEY: str = os.environ.get("CIVITAI_API_KEY")
|
| 66 |
hf_token: str = os.environ.get("HF_TOKEN")
|
| 67 |
|
| 68 |
+
|
| 69 |
+
task_model_list = list(task_stablepy.keys())
|
| 70 |
+
|
| 71 |
+
directory_models: str = 'models'
|
| 72 |
+
os.makedirs(
|
| 73 |
+
directory_models,
|
| 74 |
+
exist_ok=True
|
| 75 |
+
)
|
| 76 |
+
directory_loras: str = 'loras'
|
| 77 |
+
os.makedirs(
|
| 78 |
+
directory_loras,
|
| 79 |
+
exist_ok=True
|
| 80 |
+
)
|
| 81 |
+
directory_vaes: str = 'vaes'
|
| 82 |
+
os.makedirs(
|
| 83 |
+
directory_vaes,
|
| 84 |
+
exist_ok=True
|
| 85 |
+
)
|
| 86 |
+
directory_embeds: str = 'embedings'
|
| 87 |
+
os.makedirs(
|
| 88 |
+
directory_embeds,
|
| 89 |
+
exist_ok=True
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
# Download stuffs
|
| 93 |
for url in [url.strip() for url in download_model.split(',')]:
|
| 94 |
if not os.path.exists(f"./models/{url.split('/')[-1]}"):
|
|
|
|
| 117 |
CIVITAI_API_KEY
|
| 118 |
)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
for url_embed in download_embeds:
|
| 121 |
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
|
| 122 |
download_things(
|
|
|
|
| 137 |
|
| 138 |
|
| 139 |
def get_my_lora(link_url):
|
| 140 |
+
for __url in [_url.strip() for _url in link_url.split(',')]:
|
| 141 |
+
if not os.path.exists(f"./loras/{__url.split('/')[-1]}"):
|
| 142 |
download_things(
|
| 143 |
directory_loras,
|
| 144 |
+
__url,
|
| 145 |
hf_token,
|
| 146 |
CIVITAI_API_KEY
|
| 147 |
)
|
|
|
|
| 164 |
|
| 165 |
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
#######################
|
| 168 |
# GUI
|
| 169 |
#######################
|
|
|
|
| 174 |
import time, json
|
| 175 |
from IPython.utils import capture
|
| 176 |
import logging
|
| 177 |
+
from utils.string_utils import extract_parameters
|
| 178 |
+
from stablepy import logger
|
| 179 |
|
| 180 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
| 181 |
import diffusers
|
|
|
|
| 198 |
category=FutureWarning,
|
| 199 |
module="transformers"
|
| 200 |
)
|
|
|
|
| 201 |
|
| 202 |
logger.setLevel(logging.DEBUG)
|
| 203 |
|
|
|
|
| 360 |
vae_model = vae_model if vae_model != "None" else None
|
| 361 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
| 362 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
| 363 |
+
msg_lora: list = []
|
| 364 |
|
| 365 |
if model_name in model_list:
|
| 366 |
model_is_xl = "xl" in model_name.lower()
|
|
|
|
| 383 |
vae_model = None
|
| 384 |
|
| 385 |
for la in loras_list:
|
| 386 |
+
if la is None or la == "None" or la not in lora_model_list:
|
| 387 |
+
continue
|
| 388 |
+
|
| 389 |
+
print(la)
|
| 390 |
+
lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
|
| 391 |
+
if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
|
| 392 |
+
msg_inc_lora = f"The LoRA {la} is for {'SD 1.5' if model_is_xl else 'SDXL'}, but you are using {model_type}."
|
| 393 |
+
gr.Info(msg_inc_lora)
|
| 394 |
+
msg_lora.append(msg_inc_lora)
|
| 395 |
|
| 396 |
task = task_stablepy[task]
|
| 397 |
|
|
|
|
| 632 |
)
|
| 633 |
|
| 634 |
|
| 635 |
+
# APP
|
| 636 |
with gr.Blocks(css=CSS) as app:
|
| 637 |
gr.Markdown("# 🧩 (Ivan) DiffuseCraft")
|
| 638 |
with gr.Tab("Generation"):
|
|
|
|
| 761 |
}
|
| 762 |
valid_keys = list(valid_receptors.keys())
|
| 763 |
|
| 764 |
+
parameters: dict = extract_parameters(base_prompt)
|
| 765 |
for key, val in parameters.items():
|
| 766 |
# print(val)
|
| 767 |
if key in valid_keys:
|
|
|
|
| 845 |
vae_model_gui = gr.Dropdown(
|
| 846 |
label="VAE Model",
|
| 847 |
choices=vae_model_list,
|
| 848 |
+
value=vae_model_list[1]
|
| 849 |
)
|
| 850 |
|
| 851 |
with gr.Accordion("Hires fix", open=False, visible=True):
|
|
|
|
| 976 |
button_lora.click(
|
| 977 |
get_my_lora,
|
| 978 |
[text_lora],
|
| 979 |
+
[
|
| 980 |
+
lora1_gui,
|
| 981 |
+
lora2_gui,
|
| 982 |
+
lora3_gui,
|
| 983 |
+
lora4_gui,
|
| 984 |
+
lora5_gui
|
| 985 |
+
]
|
| 986 |
)
|
| 987 |
|
| 988 |
+
with gr.Accordion("IP-Adapter", open=False, visible=True): # IP-Adapter
|
|
|
|
| 989 |
IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
|
| 990 |
MODE_IP_OPTIONS = [
|
| 991 |
"original",
|
|
|
|
| 995 |
]
|
| 996 |
|
| 997 |
with gr.Accordion("IP-Adapter 1", open=False, visible=True):
|
| 998 |
+
image_ip1 = gr.Image(
|
| 999 |
+
label="IP Image",
|
| 1000 |
+
type="filepath"
|
| 1001 |
+
)
|
| 1002 |
+
mask_ip1 = gr.Image(
|
| 1003 |
+
label="IP Mask",
|
| 1004 |
+
type="filepath"
|
| 1005 |
+
)
|
| 1006 |
+
model_ip1 = gr.Dropdown(
|
| 1007 |
+
value="plus_face",
|
| 1008 |
+
label="Model",
|
| 1009 |
+
choices=IP_MODELS
|
| 1010 |
+
)
|
| 1011 |
+
mode_ip1 = gr.Dropdown(
|
| 1012 |
+
value="original",
|
| 1013 |
+
label="Mode",
|
| 1014 |
+
choices=MODE_IP_OPTIONS
|
| 1015 |
+
)
|
| 1016 |
+
scale_ip1 = gr.Slider(
|
| 1017 |
+
minimum=0.,
|
| 1018 |
+
maximum=2.,
|
| 1019 |
+
step=0.01,
|
| 1020 |
+
value=0.7,
|
| 1021 |
+
label="Scale"
|
| 1022 |
+
)
|
| 1023 |
+
|
| 1024 |
with gr.Accordion("IP-Adapter 2", open=False, visible=True):
|
| 1025 |
+
image_ip2 = gr.Image(
|
| 1026 |
+
label="IP Image",
|
| 1027 |
+
type="filepath"
|
| 1028 |
+
)
|
| 1029 |
+
mask_ip2 = gr.Image(
|
| 1030 |
+
label="IP Mask (optional)",
|
| 1031 |
+
type="filepath"
|
| 1032 |
+
)
|
| 1033 |
+
model_ip2 = gr.Dropdown(
|
| 1034 |
+
value="base",
|
| 1035 |
+
label="Model",
|
| 1036 |
+
choices=IP_MODELS
|
| 1037 |
+
)
|
| 1038 |
+
mode_ip2 = gr.Dropdown(
|
| 1039 |
+
value="style",
|
| 1040 |
+
label="Mode",
|
| 1041 |
+
choices=MODE_IP_OPTIONS
|
| 1042 |
+
)
|
| 1043 |
+
scale_ip2 = gr.Slider(
|
| 1044 |
+
minimum=0.,
|
| 1045 |
+
maximum=2.,
|
| 1046 |
+
step=0.01,
|
| 1047 |
+
value=0.7,
|
| 1048 |
+
label="Scale"
|
| 1049 |
+
)
|
| 1050 |
|
| 1051 |
with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
|
| 1052 |
image_control = gr.Image(
|
|
|
|
| 1083 |
choices_task = preprocessor_controlnet[task]
|
| 1084 |
else:
|
| 1085 |
choices_task = preprocessor_controlnet["canny"]
|
| 1086 |
+
return gr.update(
|
| 1087 |
+
choices=choices_task,
|
| 1088 |
+
value=choices_task[0]
|
| 1089 |
+
)
|
| 1090 |
|
| 1091 |
|
| 1092 |
task_gui.change(
|
|
|
|
| 1653 |
)
|
| 1654 |
|
| 1655 |
app.queue()
|
|
|
|
| 1656 |
app.launch(
|
| 1657 |
show_error=True,
|
| 1658 |
debug=True,
|
models/controlnet.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
preprocessor_controlnet: dict = {
|
| 2 |
+
"openpose": [
|
| 3 |
+
"Openpose",
|
| 4 |
+
"None",
|
| 5 |
+
],
|
| 6 |
+
"scribble": [
|
| 7 |
+
"HED",
|
| 8 |
+
"Pidinet",
|
| 9 |
+
"None",
|
| 10 |
+
],
|
| 11 |
+
"softedge": [
|
| 12 |
+
"Pidinet",
|
| 13 |
+
"HED",
|
| 14 |
+
"HED safe",
|
| 15 |
+
"Pidinet safe",
|
| 16 |
+
"None",
|
| 17 |
+
],
|
| 18 |
+
"segmentation": [
|
| 19 |
+
"UPerNet",
|
| 20 |
+
"None",
|
| 21 |
+
],
|
| 22 |
+
"depth": [
|
| 23 |
+
"DPT",
|
| 24 |
+
"Midas",
|
| 25 |
+
"None",
|
| 26 |
+
],
|
| 27 |
+
"normalbae": [
|
| 28 |
+
"NormalBae",
|
| 29 |
+
"None",
|
| 30 |
+
],
|
| 31 |
+
"lineart": [
|
| 32 |
+
"Lineart",
|
| 33 |
+
"Lineart coarse",
|
| 34 |
+
"Lineart (anime)",
|
| 35 |
+
"None",
|
| 36 |
+
"None (anime)",
|
| 37 |
+
],
|
| 38 |
+
"shuffle": [
|
| 39 |
+
"ContentShuffle",
|
| 40 |
+
"None",
|
| 41 |
+
],
|
| 42 |
+
"canny": [
|
| 43 |
+
"Canny"
|
| 44 |
+
],
|
| 45 |
+
"mlsd": [
|
| 46 |
+
"MLSD"
|
| 47 |
+
],
|
| 48 |
+
"ip2p": [
|
| 49 |
+
"ip2p"
|
| 50 |
+
]
|
| 51 |
+
}
|
models/embeds.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
download_embeds: list = [
|
| 2 |
+
'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
|
| 3 |
+
'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
|
| 4 |
+
'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
|
| 5 |
+
]
|
utils/model_utils.py
CHANGED
|
@@ -1,4 +1,8 @@
|
|
| 1 |
def get_model_list(directory_path):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import os
|
| 3 |
model_list: list = []
|
| 4 |
valid_extensions = {
|
|
@@ -16,4 +20,4 @@ def get_model_list(directory_path):
|
|
| 16 |
# model_list.append((name_without_extension, file_path))
|
| 17 |
model_list.append(file_path)
|
| 18 |
print('\033[34mFILE: ' + file_path + '\033[0m')
|
| 19 |
-
return model_list
|
|
|
|
| 1 |
def get_model_list(directory_path):
|
| 2 |
+
"""
|
| 3 |
+
:param directory_path:
|
| 4 |
+
:return:
|
| 5 |
+
"""
|
| 6 |
import os
|
| 7 |
model_list: list = []
|
| 8 |
valid_extensions = {
|
|
|
|
| 20 |
# model_list.append((name_without_extension, file_path))
|
| 21 |
model_list.append(file_path)
|
| 22 |
print('\033[34mFILE: ' + file_path + '\033[0m')
|
| 23 |
+
return model_list
|
utils/string_utils.py
CHANGED
|
@@ -12,3 +12,48 @@ def process_string(input_string: str):
|
|
| 12 |
return result
|
| 13 |
else:
|
| 14 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
return result
|
| 13 |
else:
|
| 14 |
return None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def extract_parameters(input_string: str) -> dict:
|
| 18 |
+
"""
|
| 19 |
+
:param input_string:
|
| 20 |
+
:return:
|
| 21 |
+
"""
|
| 22 |
+
import re
|
| 23 |
+
parameters: dict = {}
|
| 24 |
+
input_string: str = input_string.replace("\n", "")
|
| 25 |
+
|
| 26 |
+
if not "Negative prompt:" in input_string:
|
| 27 |
+
print("Negative prompt not detected")
|
| 28 |
+
parameters["prompt"] = input_string
|
| 29 |
+
return parameters
|
| 30 |
+
|
| 31 |
+
parm: list = input_string.split("Negative prompt:")
|
| 32 |
+
parameters["prompt"] = parm[0]
|
| 33 |
+
if not "Steps:" in parm[1]:
|
| 34 |
+
print("Steps not detected")
|
| 35 |
+
parameters["neg_prompt"] = parm[1]
|
| 36 |
+
return parameters
|
| 37 |
+
parm = parm[1].split("Steps:")
|
| 38 |
+
parameters["neg_prompt"] = parm[0]
|
| 39 |
+
input_string = "Steps:" + parm[1]
|
| 40 |
+
|
| 41 |
+
# Extracting Steps
|
| 42 |
+
steps_match = re.search(r'Steps: (\d+)', input_string)
|
| 43 |
+
if steps_match:
|
| 44 |
+
parameters['Steps'] = int(steps_match.group(1))
|
| 45 |
+
|
| 46 |
+
# Extracting Size
|
| 47 |
+
size_match = re.search(r'Size: (\d+x\d+)', input_string)
|
| 48 |
+
if size_match:
|
| 49 |
+
parameters['Size'] = size_match.group(1)
|
| 50 |
+
width, height = map(int, parameters['Size'].split('x'))
|
| 51 |
+
parameters['width'] = width
|
| 52 |
+
parameters['height'] = height
|
| 53 |
+
|
| 54 |
+
# Extracting other parameters
|
| 55 |
+
other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
|
| 56 |
+
for param in other_parameters:
|
| 57 |
+
parameters[param[0]] = param[1].strip('"')
|
| 58 |
+
|
| 59 |
+
return parameters
|