Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- app.py +4 -0
- model_dict.json +0 -0
- modutils.py +50 -6
app.py
CHANGED
|
@@ -437,6 +437,7 @@ from modutils import (
|
|
| 437 |
preset_quality,
|
| 438 |
preset_sampler_setting,
|
| 439 |
set_quick_presets,
|
|
|
|
| 440 |
)
|
| 441 |
def description_ui():
|
| 442 |
gr.Markdown(
|
|
@@ -610,6 +611,9 @@ class GuiSD:
|
|
| 610 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
| 611 |
msg_lora = []
|
| 612 |
|
|
|
|
|
|
|
|
|
|
| 613 |
|
| 614 |
if model_name in model_list:
|
| 615 |
model_is_xl = "xl" in model_name.lower()
|
|
|
|
| 437 |
preset_quality,
|
| 438 |
preset_sampler_setting,
|
| 439 |
set_quick_presets,
|
| 440 |
+
insert_model_recom_prompt,
|
| 441 |
)
|
| 442 |
def description_ui():
|
| 443 |
gr.Markdown(
|
|
|
|
| 611 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
| 612 |
msg_lora = []
|
| 613 |
|
| 614 |
+
## BEGIN MOD
|
| 615 |
+
prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
|
| 616 |
+
## END MOD
|
| 617 |
|
| 618 |
if model_name in model_list:
|
| 619 |
model_is_xl = "xl" in model_name.lower()
|
model_dict.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modutils.py
CHANGED
|
@@ -78,7 +78,7 @@ def download_private_repo(repo_id, dir_path, is_replace):
|
|
| 78 |
if not hf_read_token: return
|
| 79 |
try:
|
| 80 |
snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
|
| 81 |
-
except
|
| 82 |
return
|
| 83 |
else:
|
| 84 |
if is_replace:
|
|
@@ -99,7 +99,7 @@ def get_private_model_list(repo_id, dir_path):
|
|
| 99 |
if not hf_read_token: return []
|
| 100 |
try:
|
| 101 |
files = api.list_repo_files(repo_id, token=hf_read_token)
|
| 102 |
-
except
|
| 103 |
return []
|
| 104 |
else:
|
| 105 |
model_list = []
|
|
@@ -135,7 +135,7 @@ def download_private_file(repo_id, path, is_replace):
|
|
| 135 |
dirname = file.parent.name
|
| 136 |
try:
|
| 137 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
|
| 138 |
-
except
|
| 139 |
return
|
| 140 |
else:
|
| 141 |
if is_replace:
|
|
@@ -154,7 +154,7 @@ def get_model_id_list():
|
|
| 154 |
try:
|
| 155 |
models_vp = api.list_models(author="votepurchase", cardData=True, sort="likes")
|
| 156 |
models_john = api.list_models(author="John6666", cardData=True, sort="last_modified")
|
| 157 |
-
except
|
| 158 |
return model_ids
|
| 159 |
else:
|
| 160 |
for model in models_vp:
|
|
@@ -203,7 +203,7 @@ def get_tupled_model_list(model_list):
|
|
| 203 |
try:
|
| 204 |
if not api.repo_exists(repo_id): continue
|
| 205 |
model = api.model_info(repo_id=repo_id)
|
| 206 |
-
except
|
| 207 |
continue
|
| 208 |
else:
|
| 209 |
if model.private or model.gated: continue
|
|
@@ -859,7 +859,7 @@ def get_model_pipeline(repo_id: str):
|
|
| 859 |
try:
|
| 860 |
if " " in repo_id or not api.repo_exists(repo_id): return default
|
| 861 |
model = api.model_info(repo_id=repo_id)
|
| 862 |
-
except
|
| 863 |
return default
|
| 864 |
else:
|
| 865 |
if model.private or model.gated: return default
|
|
@@ -871,3 +871,47 @@ def get_model_pipeline(repo_id: str):
|
|
| 871 |
return "StableDiffusionPipeline"
|
| 872 |
else:
|
| 873 |
return default
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
if not hf_read_token: return
|
| 79 |
try:
|
| 80 |
snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
|
| 81 |
+
except Exception as e:
|
| 82 |
return
|
| 83 |
else:
|
| 84 |
if is_replace:
|
|
|
|
| 99 |
if not hf_read_token: return []
|
| 100 |
try:
|
| 101 |
files = api.list_repo_files(repo_id, token=hf_read_token)
|
| 102 |
+
except Exception as e:
|
| 103 |
return []
|
| 104 |
else:
|
| 105 |
model_list = []
|
|
|
|
| 135 |
dirname = file.parent.name
|
| 136 |
try:
|
| 137 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
|
| 138 |
+
except Exception as e:
|
| 139 |
return
|
| 140 |
else:
|
| 141 |
if is_replace:
|
|
|
|
| 154 |
try:
|
| 155 |
models_vp = api.list_models(author="votepurchase", cardData=True, sort="likes")
|
| 156 |
models_john = api.list_models(author="John6666", cardData=True, sort="last_modified")
|
| 157 |
+
except Exception as e:
|
| 158 |
return model_ids
|
| 159 |
else:
|
| 160 |
for model in models_vp:
|
|
|
|
| 203 |
try:
|
| 204 |
if not api.repo_exists(repo_id): continue
|
| 205 |
model = api.model_info(repo_id=repo_id)
|
| 206 |
+
except Exception as e:
|
| 207 |
continue
|
| 208 |
else:
|
| 209 |
if model.private or model.gated: continue
|
|
|
|
| 859 |
try:
|
| 860 |
if " " in repo_id or not api.repo_exists(repo_id): return default
|
| 861 |
model = api.model_info(repo_id=repo_id)
|
| 862 |
+
except Exception as e:
|
| 863 |
return default
|
| 864 |
else:
|
| 865 |
if model.private or model.gated: return default
|
|
|
|
| 871 |
return "StableDiffusionPipeline"
|
| 872 |
else:
|
| 873 |
return default
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
def load_model_prompt_dict():
|
| 877 |
+
import json
|
| 878 |
+
dict = {}
|
| 879 |
+
with open('model_dict.json', encoding='utf-8') as f:
|
| 880 |
+
dict = json.load(f)
|
| 881 |
+
return dict
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
model_prompt_dict = load_model_prompt_dict()
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
|
| 888 |
+
def to_list(s):
|
| 889 |
+
return [x.strip() for x in s.split(",") if not s == ""]
|
| 890 |
+
|
| 891 |
+
def list_sub(a, b):
|
| 892 |
+
return [e for e in a if e not in b]
|
| 893 |
+
|
| 894 |
+
def list_uniq(l):
|
| 895 |
+
return sorted(set(l), key=l.index)
|
| 896 |
+
|
| 897 |
+
if not model_name or not model_name in model_prompt_dict.keys(): return prompt, neg_prompt
|
| 898 |
+
animagine_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
| 899 |
+
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
| 900 |
+
pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
| 901 |
+
pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
|
| 902 |
+
other_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
|
| 903 |
+
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
|
| 904 |
+
prompts = to_list(prompt)
|
| 905 |
+
neg_prompts = to_list(neg_prompt)
|
| 906 |
+
prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
|
| 907 |
+
neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
|
| 908 |
+
last_empty_p = [""] if not prompts and type != "None" else []
|
| 909 |
+
last_empty_np = [""] if not neg_prompts and type != "None" else []
|
| 910 |
+
ps = to_list(model_prompt_dict[model_name]["prompt"])
|
| 911 |
+
nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
|
| 912 |
+
prompts = prompts + ps
|
| 913 |
+
neg_prompts = neg_prompts + nps
|
| 914 |
+
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
|
| 915 |
+
neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
|
| 916 |
+
return prompt, neg_prompt
|
| 917 |
+
|