Spaces:
Running
Running
Upload 8 files
Browse files- .gitattributes +2 -0
- README.md +12 -12
- app.py +29 -0
- ctag.py +147 -0
- data/animagine.json +0 -0
- data/illustrious.json +0 -0
- data/noob_danbooru.json +3 -0
- data/noob_e621.json +3 -0
- requirements.txt +2 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
data/noob_danbooru.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
data/noob_e621.json filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: Animagine / Illustrious / NoobAI XL Characters Tag Search
|
3 |
+
emoji: π»π·οΈπ
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.12.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ctag import MODELS, DEFAULT_DF, search_char_dict, on_select_df
|
3 |
+
|
4 |
+
CSS = """
|
5 |
+
.title { font-size: 3em; align-items: center; text-align: center; }
|
6 |
+
.info { align-items: center; text-align: center; }
|
7 |
+
img[src*="#center"] { display: block; margin: auto; }
|
8 |
+
"""
|
9 |
+
|
10 |
+
with gr.Blocks(fill_width=True, css=CSS) as app:
|
11 |
+
gr.Markdown("## π Text Search for Animagine / Illustrious / NoobAI XL tag characters", elem_classes="title")
|
12 |
+
with gr.Column():
|
13 |
+
with gr.Group():
|
14 |
+
with gr.Row(equal_height=True):
|
15 |
+
with gr.Column(scale=2):
|
16 |
+
search_input = gr.Textbox(label="Search for characters or series:", placeholder="sousou no frieren")
|
17 |
+
search_detail = gr.Checkbox(label="Show character detail", value=True)
|
18 |
+
search_model = gr.CheckboxGroup(label="Models", choices=MODELS, value=MODELS, scale=1)
|
19 |
+
with gr.Group():
|
20 |
+
with gr.Row(equal_height=True):
|
21 |
+
search_tag = gr.Textbox(label="Output tag", value="", show_copy_button=True, interactive=False)
|
22 |
+
search_md = gr.Markdown("<br><br><br>", elem_classes="info")
|
23 |
+
search_output = gr.Dataframe(label="Select character", value=DEFAULT_DF, type="pandas", wrap=True, interactive=False)
|
24 |
+
|
25 |
+
gr.on(triggers=[search_input.change, search_model.change], fn=search_char_dict,
|
26 |
+
inputs=[search_input, search_model], outputs=[search_output], trigger_mode="always_last")
|
27 |
+
search_output.select(on_select_df, [search_output, search_detail], [search_tag, search_md])
|
28 |
+
|
29 |
+
app.launch(ssr_mode=False)
|
ctag.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import json
|
4 |
+
import requests
|
5 |
+
from requests.adapters import HTTPAdapter
|
6 |
+
from urllib3.util import Retry
|
7 |
+
from pykakasi import kakasi
|
8 |
+
|
9 |
+
|
10 |
+
MODELS = ["Animagine 3.1", "NoobAI XL", "Illustrious"]
|
11 |
+
DEFAULT_DF = pd.DataFrame({"Character": ["oomuro sakurako (yuru yuri) (NoobAI XL)", "kafuu chino (gochuumon wa usagi desu ka?) (Animagine 3.1)"]})
|
12 |
+
|
13 |
+
|
14 |
+
kks = kakasi()
|
15 |
+
|
16 |
+
|
17 |
+
def to_roman(s: str):
|
18 |
+
try:
|
19 |
+
return "".join([i.get("hepburn", "") for i in kks.convert(s)])
|
20 |
+
except Exception as e:
|
21 |
+
print(e)
|
22 |
+
return s
|
23 |
+
|
24 |
+
|
25 |
+
def get_user_agent():
|
26 |
+
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
|
27 |
+
|
28 |
+
|
29 |
+
def get_series_wiki_dict():
|
30 |
+
user_agent = get_user_agent()
|
31 |
+
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
32 |
+
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php?title_wiki_links'
|
33 |
+
params = {}
|
34 |
+
session = requests.Session()
|
35 |
+
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
36 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
37 |
+
try:
|
38 |
+
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
|
39 |
+
if not r.ok: return {}
|
40 |
+
j = dict(r.json())
|
41 |
+
if "titles" not in j: return {}
|
42 |
+
return {str(i["anime_id"]): i["wikipedia_url"] for i in j["titles"]}
|
43 |
+
except Exception as e:
|
44 |
+
print(e)
|
45 |
+
return {}
|
46 |
+
|
47 |
+
|
48 |
+
series_wiki_dict = get_series_wiki_dict()
|
49 |
+
|
50 |
+
|
51 |
+
def find_char_info(query: str):
|
52 |
+
user_agent = get_user_agent()
|
53 |
+
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
54 |
+
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php'
|
55 |
+
params = {"character_q": query}
|
56 |
+
params2 = {"character_q": " ".join(list(reversed(query.split(" "))))}
|
57 |
+
session = requests.Session()
|
58 |
+
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
59 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
60 |
+
try:
|
61 |
+
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
|
62 |
+
if not r.ok or r.json() == -1:
|
63 |
+
r = session.get(base_url, params=params2, headers=headers, stream=True, timeout=(5.0, 15))
|
64 |
+
if not r.ok or r.json() == -1: return None
|
65 |
+
j = dict(r.json())
|
66 |
+
if "search_results" not in j or len(j["search_results"]) == 0: return None
|
67 |
+
d = {}
|
68 |
+
i = j["search_results"][0]
|
69 |
+
d["name"] = i["name"]
|
70 |
+
d["series"] = i["anime_name"]
|
71 |
+
d["gender"] = i["gender"]
|
72 |
+
d["image"] = i["character_image"]
|
73 |
+
d["desc"] = i["desc"]
|
74 |
+
aid = str(i["anime_id"])
|
75 |
+
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
|
76 |
+
return d
|
77 |
+
except Exception as e:
|
78 |
+
print(e)
|
79 |
+
return None
|
80 |
+
|
81 |
+
|
82 |
+
def load_json(filename: str):
|
83 |
+
try:
|
84 |
+
with open(filename, encoding="utf-8") as f:
|
85 |
+
d = json.load(f)
|
86 |
+
return dict(d)
|
87 |
+
except Exception as e:
|
88 |
+
print(e)
|
89 |
+
return {}
|
90 |
+
|
91 |
+
|
92 |
+
def load_char_dict():
|
93 |
+
d = {}
|
94 |
+
d["Animagine 3.1"] = load_json("data/animagine.json")
|
95 |
+
d["Illustrious"] = load_json("data/illustrious.json")
|
96 |
+
d["NoobAI XL"] = load_json("data/noob_danbooru.json") | load_json("data/noob_e621.json")
|
97 |
+
return d
|
98 |
+
|
99 |
+
|
100 |
+
def create_char_df(char_dict: dict):
|
101 |
+
d = {}
|
102 |
+
m = {}
|
103 |
+
tags = []
|
104 |
+
for model, value in char_dict.items():
|
105 |
+
for name, v in value.items():
|
106 |
+
tag = f'{v["name"]} ({v["series"]}) ({model})' if v["series"] else f'{v["name"]} ({model})'
|
107 |
+
tags.append(tag)
|
108 |
+
d[tag] = v.copy()
|
109 |
+
m[tag] = model
|
110 |
+
df = pd.DataFrame({"Character": tags})
|
111 |
+
return df, d, m
|
112 |
+
|
113 |
+
|
114 |
+
char_dict = load_char_dict()
|
115 |
+
char_df, tag_dict, model_dict = create_char_df(char_dict)
|
116 |
+
|
117 |
+
|
118 |
+
def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=True)):
|
119 |
+
try:
|
120 |
+
MAX_COLS = 50
|
121 |
+
if q.strip():
|
122 |
+
#search_results = df[df["Character"].str.contains(to_roman(q).lower(), regex=False, na=False) & df["Character"].str.contains("(?: \\(" + "\\))|(?: \\(".join(models) + "\\))", regex=True, na=False)]
|
123 |
+
l = char_df["Character"].tolist()
|
124 |
+
rq = to_roman(q)
|
125 |
+
mt = tuple([f" ({s})" for s in models])
|
126 |
+
search_results = pd.DataFrame({"Character": [s for s in l if rq in s and s.endswith(mt)]})
|
127 |
+
else:
|
128 |
+
return DEFAULT_DF
|
129 |
+
if len(search_results.columns) > MAX_COLS:
|
130 |
+
search_results = search_results.iloc[:, :MAX_COLS]
|
131 |
+
return search_results
|
132 |
+
except Exception as e:
|
133 |
+
print(e)
|
134 |
+
|
135 |
+
|
136 |
+
def on_select_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
|
137 |
+
d = tag_dict.get(evt.value, None)
|
138 |
+
if d is None: return ""
|
139 |
+
print(d)
|
140 |
+
if is_detail: info = find_char_info(d["name"])
|
141 |
+
else: info = None
|
142 |
+
print(info)
|
143 |
+
if info is not None:
|
144 |
+
md = f'## [{info["name"]}]({info["wiki"]}) / [{info["series"]}]({info["wiki"]}) / {info["gender"]}\n![{info["name"]}]({info["image"]}#center)\n[{info["desc"]}]({info["wiki"]})'
|
145 |
+
else: md = f'## {d["name"]} / {d["series"]}' if d["series"] else f'## {d["name"]}'
|
146 |
+
md += f'\n<br>Tag is for {model_dict[evt.value]}.'
|
147 |
+
return d["tag"], md
|
data/animagine.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/illustrious.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/noob_danbooru.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f8d69d45ba7442f74edcc88aec042d5021116c12bea193bd8edd4bbc9f35016
|
3 |
+
size 43052594
|
data/noob_e621.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8597a1068152368c4f5f59f4fab46669175e7685a7d2edce85239c36c227101c
|
3 |
+
size 38623302
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
pykakasi
|