Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +10 -6
- multit2i.py +31 -15
app.py
CHANGED
|
@@ -47,7 +47,7 @@ css = """
|
|
| 47 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
| 48 |
with gr.Column():
|
| 49 |
with gr.Accordion("Advanced settings", open=False):
|
| 50 |
-
with gr.Accordion("Recommended Prompt", open=
|
| 51 |
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
| 52 |
with gr.Row():
|
| 53 |
positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
|
|
@@ -63,9 +63,9 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
| 63 |
v2_tag_type = gr.Radio(label="Tag Type", info="danbooru for common, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru", visible=False)
|
| 64 |
v2_model = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
|
| 65 |
v2_copy = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
with gr.Group():
|
| 70 |
with gr.Accordion("Prompt from Image File", open=False):
|
| 71 |
tagger_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
|
|
@@ -118,9 +118,11 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
| 118 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
| 119 |
outputs=[results],
|
| 120 |
queue=True,
|
|
|
|
|
|
|
| 121 |
show_progress="full",
|
| 122 |
show_api=True,
|
| 123 |
-
).
|
| 124 |
gr.on(
|
| 125 |
triggers=[random_button.click],
|
| 126 |
fn=infer_multi_random,
|
|
@@ -128,9 +130,11 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
| 128 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
| 129 |
outputs=[results],
|
| 130 |
queue=True,
|
|
|
|
|
|
|
| 131 |
show_progress="full",
|
| 132 |
show_api=True,
|
| 133 |
-
).
|
| 134 |
clear_prompt.click(lambda: (None, None, None), None, [prompt, v2_series, v2_character], queue=False, show_api=False)
|
| 135 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
| 136 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
|
|
|
| 47 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
| 48 |
with gr.Column():
|
| 49 |
with gr.Accordion("Advanced settings", open=False):
|
| 50 |
+
with gr.Accordion("Recommended Prompt", open=True):
|
| 51 |
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
| 52 |
with gr.Row():
|
| 53 |
positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
|
|
|
|
| 63 |
v2_tag_type = gr.Radio(label="Tag Type", info="danbooru for common, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru", visible=False)
|
| 64 |
v2_model = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
|
| 65 |
v2_copy = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
|
| 66 |
+
with gr.Accordion("Model", open=True):
|
| 67 |
+
model_name = gr.Dropdown(label="Select Model", show_label=False, choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
|
| 68 |
+
model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_id="model_info")
|
| 69 |
with gr.Group():
|
| 70 |
with gr.Accordion("Prompt from Image File", open=False):
|
| 71 |
tagger_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
|
|
|
|
| 118 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
| 119 |
outputs=[results],
|
| 120 |
queue=True,
|
| 121 |
+
trigger_mode="multiple",
|
| 122 |
+
concurrency_limit=5,
|
| 123 |
show_progress="full",
|
| 124 |
show_api=True,
|
| 125 |
+
).then(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
| 126 |
gr.on(
|
| 127 |
triggers=[random_button.click],
|
| 128 |
fn=infer_multi_random,
|
|
|
|
| 130 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
| 131 |
outputs=[results],
|
| 132 |
queue=True,
|
| 133 |
+
trigger_mode="multiple",
|
| 134 |
+
concurrency_limit=5,
|
| 135 |
show_progress="full",
|
| 136 |
show_api=True,
|
| 137 |
+
).then(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
| 138 |
clear_prompt.click(lambda: (None, None, None), None, [prompt, v2_series, v2_character], queue=False, show_api=False)
|
| 139 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
| 140 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
multit2i.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import asyncio
|
| 3 |
-
import queue
|
| 4 |
from threading import RLock
|
| 5 |
from pathlib import Path
|
| 6 |
|
|
@@ -108,6 +107,8 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
| 108 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
| 109 |
|
| 110 |
|
|
|
|
|
|
|
| 111 |
def load_from_model(model_name: str, hf_token: str = None):
|
| 112 |
import httpx
|
| 113 |
import huggingface_hub
|
|
@@ -167,6 +168,7 @@ async def async_load_models(models: list, limit: int=5):
|
|
| 167 |
async def async_load_model(model: str):
|
| 168 |
async with sem:
|
| 169 |
try:
|
|
|
|
| 170 |
return await asyncio.to_thread(load_model, model)
|
| 171 |
except Exception as e:
|
| 172 |
print(e)
|
|
@@ -307,7 +309,7 @@ def infer(prompt: str, neg_prompt: str, model_name: str):
|
|
| 307 |
try:
|
| 308 |
model = load_model(model_name)
|
| 309 |
if not model: return (Image.Image(), None)
|
| 310 |
-
image_path = model(prompt + seed)
|
| 311 |
image = Image.open(image_path).convert('RGBA')
|
| 312 |
except Exception as e:
|
| 313 |
print(e)
|
|
@@ -317,35 +319,49 @@ def infer(prompt: str, neg_prompt: str, model_name: str):
|
|
| 317 |
|
| 318 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
| 319 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
| 320 |
-
|
|
|
|
| 321 |
image_num = int(image_num)
|
| 322 |
images = results if results else []
|
|
|
|
| 323 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| 324 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
with lock:
|
| 330 |
-
if result and result[1]: images.append(result)
|
|
|
|
| 331 |
yield images
|
| 332 |
|
| 333 |
|
| 334 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
| 335 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
| 336 |
-
from tqdm.asyncio import tqdm_asyncio
|
| 337 |
import random
|
|
|
|
| 338 |
image_num = int(image_num)
|
| 339 |
images = results if results else []
|
|
|
|
| 340 |
random.seed()
|
| 341 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
| 342 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| 343 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
with lock:
|
| 349 |
-
if result and result[1]: images.append(result)
|
|
|
|
| 350 |
yield images
|
| 351 |
-
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import asyncio
|
|
|
|
| 3 |
from threading import RLock
|
| 4 |
from pathlib import Path
|
| 5 |
|
|
|
|
| 107 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
| 108 |
|
| 109 |
|
| 110 |
+
# https://github.com/gradio-app/gradio/blob/main/gradio/external.py
|
| 111 |
+
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
| 112 |
def load_from_model(model_name: str, hf_token: str = None):
|
| 113 |
import httpx
|
| 114 |
import huggingface_hub
|
|
|
|
| 168 |
async def async_load_model(model: str):
|
| 169 |
async with sem:
|
| 170 |
try:
|
| 171 |
+
await asyncio.sleep(0.5)
|
| 172 |
return await asyncio.to_thread(load_model, model)
|
| 173 |
except Exception as e:
|
| 174 |
print(e)
|
|
|
|
| 309 |
try:
|
| 310 |
model = load_model(model_name)
|
| 311 |
if not model: return (Image.Image(), None)
|
| 312 |
+
image_path = model(prompt + seed, neg_prompt)
|
| 313 |
image = Image.open(image_path).convert('RGBA')
|
| 314 |
except Exception as e:
|
| 315 |
print(e)
|
|
|
|
| 319 |
|
| 320 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
| 321 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
| 322 |
+
import asyncio
|
| 323 |
+
progress(0, desc="Start inference.")
|
| 324 |
image_num = int(image_num)
|
| 325 |
images = results if results else []
|
| 326 |
+
image_num_offset = len(images)
|
| 327 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| 328 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
| 329 |
+
for task in tasks:
|
| 330 |
+
progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
|
| 331 |
+
try:
|
| 332 |
+
result = await task
|
| 333 |
+
except Exception as e:
|
| 334 |
+
print(e)
|
| 335 |
+
task.cancel()
|
| 336 |
+
result = None
|
| 337 |
+
image_num_offset += 1
|
| 338 |
with lock:
|
| 339 |
+
if result and len(result) == 2 and result[1]: images.append(result)
|
| 340 |
+
await asyncio.sleep(0.05)
|
| 341 |
yield images
|
| 342 |
|
| 343 |
|
| 344 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
| 345 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 346 |
import random
|
| 347 |
+
progress(0, desc="Start inference.")
|
| 348 |
image_num = int(image_num)
|
| 349 |
images = results if results else []
|
| 350 |
+
image_num_offset = len(images)
|
| 351 |
random.seed()
|
| 352 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
| 353 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| 354 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
| 355 |
+
for task in tasks:
|
| 356 |
+
progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
|
| 357 |
+
try:
|
| 358 |
+
result = await task
|
| 359 |
+
except Exception as e:
|
| 360 |
+
print(e)
|
| 361 |
+
task.cancel()
|
| 362 |
+
result = None
|
| 363 |
+
image_num_offset += 1
|
| 364 |
with lock:
|
| 365 |
+
if result and len(result) == 2 and result[1]: images.append(result)
|
| 366 |
+
await asyncio.sleep(0.05)
|
| 367 |
yield images
|
|
|