Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import asyncio | |
| from threading import RLock | |
| from pathlib import Path | |
| lock = RLock() | |
| loaded_models = {} | |
| model_info_dict = {} | |
| def to_list(s): | |
| return [x.strip() for x in s.split(",")] | |
| def list_sub(a, b): | |
| return [e for e in a if e not in b] | |
| def list_uniq(l): | |
| return sorted(set(l), key=l.index) | |
| def is_repo_name(s): | |
| import re | |
| return re.fullmatch(r'^[^/]+?/[^/]+?$', s) | |
| def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30): | |
| from huggingface_hub import HfApi | |
| api = HfApi() | |
| default_tags = ["diffusers"] | |
| if not sort: sort = "last_modified" | |
| models = [] | |
| try: | |
| model_infos = api.list_models(author=author, pipeline_tag="text-to-image", | |
| tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit * 5) | |
| except Exception as e: | |
| print(f"Error: Failed to list models.") | |
| print(e) | |
| return models | |
| for model in model_infos: | |
| if not model.private and not model.gated: | |
| if not_tag and not_tag in model.tags: continue | |
| models.append(model.id) | |
| if len(models) == limit: break | |
| return models | |
| def get_t2i_model_info_dict(repo_id: str): | |
| from huggingface_hub import HfApi | |
| api = HfApi() | |
| info = {"md": "None"} | |
| try: | |
| if not is_repo_name(repo_id) or not api.repo_exists(repo_id=repo_id): return info | |
| model = api.model_info(repo_id=repo_id) | |
| except Exception as e: | |
| print(f"Error: Failed to get {repo_id}'s info.") | |
| print(e) | |
| return info | |
| if model.private or model.gated: return info | |
| try: | |
| tags = model.tags | |
| except Exception as e: | |
| print(e) | |
| return info | |
| if not 'diffusers' in model.tags: return info | |
| if 'diffusers:StableDiffusionXLPipeline' in tags: info["ver"] = "SDXL" | |
| elif 'diffusers:StableDiffusionPipeline' in tags: info["ver"] = "SD1.5" | |
| elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3" | |
| else: info["ver"] = "Other" | |
| info["url"] = f"https://huggingface.co/{repo_id}/" | |
| info["tags"] = model.card_data.tags if model.card_data and model.card_data.tags else [] | |
| info["downloads"] = model.downloads | |
| info["likes"] = model.likes | |
| info["last_modified"] = model.last_modified.strftime("lastmod: %Y-%m-%d") | |
| un_tags = ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl'] | |
| descs = [info["ver"]] + list_sub(info["tags"], un_tags) + [f'DLs: {info["downloads"]}'] + [f'β€: {info["likes"]}'] + [info["last_modified"]] | |
| info["md"] = f'Model Info: {", ".join(descs)} [Model Repo]({info["url"]})' | |
| return info | |
| def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)): | |
| from datetime import datetime, timezone, timedelta | |
| progress(0, desc="Updating gallery...") | |
| dt_now = datetime.now(timezone(timedelta(hours=9))) | |
| basename = dt_now.strftime('%Y%m%d_%H%M%S_') | |
| i = 1 | |
| if not images: return images | |
| output_images = [] | |
| output_paths = [] | |
| for image in images: | |
| filename = f'{image[1]}_{basename}{str(i)}.png' | |
| i += 1 | |
| oldpath = Path(image[0]) | |
| newpath = oldpath | |
| try: | |
| if oldpath.stem == "image" and oldpath.exists(): | |
| newpath = oldpath.resolve().rename(Path(filename).resolve()) | |
| except Exception as e: | |
| print(e) | |
| pass | |
| finally: | |
| output_paths.append(str(newpath)) | |
| output_images.append((str(newpath), str(filename))) | |
| progress(1, desc="Gallery updated.") | |
| return gr.update(value=output_images), gr.update(value=output_paths) | |
| # https://github.com/gradio-app/gradio/blob/main/gradio/external.py | |
| # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client | |
| def load_from_model(model_name: str, hf_token: str = None): | |
| import httpx | |
| import huggingface_hub | |
| from gradio.exceptions import ModelNotFoundError | |
| model_url = f"https://huggingface.co/{model_name}" | |
| api_url = f"https://api-inference.huggingface.co/models/{model_name}" | |
| print(f"Fetching model from: {model_url}") | |
| headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {} | |
| response = httpx.request("GET", api_url, headers=headers) | |
| if response.status_code != 200: | |
| raise ModelNotFoundError( | |
| f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter." | |
| ) | |
| headers["X-Wait-For-Model"] = "true" | |
| client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token) | |
| inputs = gr.components.Textbox(label="Input") | |
| outputs = gr.components.Image(label="Output") | |
| fn = client.text_to_image | |
| def query_huggingface_inference_endpoints(*data): | |
| return fn(*data) | |
| interface_info = { | |
| "fn": query_huggingface_inference_endpoints, | |
| "inputs": inputs, | |
| "outputs": outputs, | |
| "title": model_name, | |
| } | |
| return gr.Interface(**interface_info) | |
| def load_model(model_name: str): | |
| global loaded_models | |
| global model_info_dict | |
| if model_name in loaded_models.keys(): return loaded_models[model_name] | |
| try: | |
| loaded_models[model_name] = load_from_model(model_name) | |
| print(f"Loaded: {model_name}") | |
| except Exception as e: | |
| if model_name in loaded_models.keys(): del loaded_models[model_name] | |
| print(f"Failed to load: {model_name}") | |
| print(e) | |
| return None | |
| try: | |
| model_info_dict[model_name] = get_t2i_model_info_dict(model_name) | |
| print(f"Assigned: {model_name}") | |
| except Exception as e: | |
| if model_name in model_info_dict.keys(): del model_info_dict[model_name] | |
| print(f"Failed to assigned: {model_name}") | |
| print(e) | |
| return loaded_models[model_name] | |
| async def async_load_models(models: list, limit: int=5): | |
| sem = asyncio.Semaphore(limit) | |
| async def async_load_model(model: str): | |
| async with sem: | |
| try: | |
| await asyncio.sleep(0.5) | |
| return await asyncio.to_thread(load_model, model) | |
| except Exception as e: | |
| print(e) | |
| tasks = [asyncio.create_task(async_load_model(model)) for model in models] | |
| return await asyncio.gather(*tasks, return_exceptions=True) | |
| def load_models(models: list, limit: int=5): | |
| loop = asyncio.new_event_loop() | |
| try: | |
| loop.run_until_complete(async_load_models(models, limit)) | |
| except Exception as e: | |
| print(e) | |
| pass | |
| finally: | |
| loop.close() | |
| positive_prefix = { | |
| "Pony": to_list("score_9, score_8_up, score_7_up"), | |
| "Pony Anime": to_list("source_anime, anime, score_9, score_8_up, score_7_up"), | |
| } | |
| positive_suffix = { | |
| "Common": to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres"), | |
| "Anime": to_list("anime artwork, anime style, studio anime, highly detailed"), | |
| } | |
| negative_prefix = { | |
| "Pony": to_list("score_6, score_5, score_4"), | |
| "Pony Anime": to_list("score_6, score_5, score_4, source_pony, source_furry, source_cartoon"), | |
| "Pony Real": to_list("score_6, score_5, score_4, source_anime, source_pony, source_furry, source_cartoon"), | |
| } | |
| negative_suffix = { | |
| "Common": to_list("lowres, (bad), bad hands, bad feet, text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"), | |
| "Pony Anime": to_list("busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends"), | |
| "Pony Real": to_list("ugly, airbrushed, simple background, cgi, cartoon, anime"), | |
| } | |
| positive_all = negative_all = [] | |
| for k, v in (positive_prefix | positive_suffix).items(): | |
| positive_all = positive_all + v + [s.replace("_", " ") for s in v] | |
| positive_all = list_uniq(positive_all) | |
| for k, v in (negative_prefix | negative_suffix).items(): | |
| negative_all = negative_all + v + [s.replace("_", " ") for s in v] | |
| positive_all = list_uniq(positive_all) | |
| def recom_prompt(prompt: str = "", neg_prompt: str = "", pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []): | |
| def flatten(src): | |
| return [item for row in src for item in row] | |
| prompts = to_list(prompt) | |
| neg_prompts = to_list(neg_prompt) | |
| prompts = list_sub(prompts, positive_all) | |
| neg_prompts = list_sub(neg_prompts, negative_all) | |
| last_empty_p = [""] if not prompts and type != "None" else [] | |
| last_empty_np = [""] if not neg_prompts and type != "None" else [] | |
| prefix_ps = flatten([positive_prefix.get(s, []) for s in pos_pre]) | |
| suffix_ps = flatten([positive_suffix.get(s, []) for s in pos_suf]) | |
| prefix_nps = flatten([negative_prefix.get(s, []) for s in neg_pre]) | |
| suffix_nps = flatten([negative_suffix.get(s, []) for s in neg_suf]) | |
| prompt = ", ".join(list_uniq(prefix_ps + prompts + suffix_ps) + last_empty_p) | |
| neg_prompt = ", ".join(list_uniq(prefix_nps + neg_prompts + suffix_nps) + last_empty_np) | |
| return prompt, neg_prompt | |
| recom_prompt_type = { | |
| "None": ([], [], [], []), | |
| "Auto": ([], [], [], []), | |
| "Common": ([], ["Common"], [], ["Common"]), | |
| "Animagine": ([], ["Common", "Anime"], [], ["Common"]), | |
| "Pony": (["Pony"], ["Common"], ["Pony"], ["Common"]), | |
| "Pony Anime": (["Pony", "Pony Anime"], ["Common", "Anime"], ["Pony", "Pony Anime"], ["Common", "Pony Anime"]), | |
| "Pony Real": (["Pony"], ["Common"], ["Pony", "Pony Real"], ["Common", "Pony Real"]), | |
| } | |
| enable_auto_recom_prompt = False | |
| def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"): | |
| global enable_auto_recom_prompt | |
| if type == "Auto": enable_auto_recom_prompt = True | |
| else: enable_auto_recom_prompt = False | |
| pos_pre, pos_suf, neg_pre, neg_suf = recom_prompt_type.get(type, ([], [], [], [])) | |
| return recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf) | |
| def set_recom_prompt_preset(type: str = "None"): | |
| pos_pre, pos_suf, neg_pre, neg_suf = recom_prompt_type.get(type, ([], [], [], [])) | |
| return pos_pre, pos_suf, neg_pre, neg_suf | |
| def get_recom_prompt_type(): | |
| type = list(recom_prompt_type.keys()) | |
| type.remove("Auto") | |
| return type | |
| def get_positive_prefix(): | |
| return list(positive_prefix.keys()) | |
| def get_positive_suffix(): | |
| return list(positive_suffix.keys()) | |
| def get_negative_prefix(): | |
| return list(negative_prefix.keys()) | |
| def get_negative_suffix(): | |
| return list(negative_suffix.keys()) | |
| def get_tag_type(pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []): | |
| tag_type = "danbooru" | |
| words = pos_pre + pos_suf + neg_pre + neg_suf | |
| for word in words: | |
| if "Pony" in word: | |
| tag_type = "e621" | |
| break | |
| return tag_type | |
| def get_model_info_md(model_name: str): | |
| if model_name in model_info_dict.keys(): return model_info_dict[model_name].get("md", "") | |
| def change_model(model_name: str): | |
| load_model(model_name) | |
| return get_model_info_md(model_name) | |
| def infer(prompt: str, neg_prompt: str, model_name: str): | |
| from PIL import Image | |
| import random | |
| seed = "" | |
| rand = random.randint(1, 500) | |
| for i in range(rand): | |
| seed += " " | |
| caption = model_name.split("/")[-1] | |
| try: | |
| model = load_model(model_name) | |
| if not model: return (Image.Image(), None) | |
| image_path = model(prompt + seed, neg_prompt) | |
| image = Image.open(image_path).convert('RGBA') | |
| except Exception as e: | |
| print(e) | |
| return (Image.Image(), None) | |
| return (image, caption) | |
| async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str, | |
| pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)): | |
| import asyncio | |
| progress(0, desc="Start inference.") | |
| image_num = int(image_num) | |
| images = results if results else [] | |
| image_num_offset = len(images) | |
| prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf) | |
| tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)] | |
| for task in tasks: | |
| progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.") | |
| try: | |
| result = await task | |
| except Exception as e: | |
| print(e) | |
| task.cancel() | |
| result = None | |
| image_num_offset += 1 | |
| with lock: | |
| if result and len(result) == 2 and result[1]: images.append(result) | |
| await asyncio.sleep(0.05) | |
| yield images | |
| async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float, | |
| pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)): | |
| import random | |
| progress(0, desc="Start inference.") | |
| image_num = int(image_num) | |
| images = results if results else [] | |
| image_num_offset = len(images) | |
| random.seed() | |
| model_names = random.choices(list(loaded_models.keys()), k = image_num) | |
| prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf) | |
| tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names] | |
| for task in tasks: | |
| progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.") | |
| try: | |
| result = await task | |
| except Exception as e: | |
| print(e) | |
| task.cancel() | |
| result = None | |
| image_num_offset += 1 | |
| with lock: | |
| if result and len(result) == 2 and result[1]: images.append(result) | |
| await asyncio.sleep(0.05) | |
| yield images | |