Spaces:
Running
Running
Upload 10 files
Browse files
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: π»π·οΈπ
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.13.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
hft2i.py
CHANGED
@@ -212,31 +212,29 @@ def load_model(model_name: str):
|
|
212 |
def load_model_api(model_name: str):
|
213 |
global loaded_models
|
214 |
global model_info_dict
|
215 |
-
if model_name in loaded_models.keys(): return loaded_models[model_name]
|
216 |
try:
|
217 |
-
|
218 |
-
|
219 |
-
|
|
|
220 |
print(f"Failed to load by API: {model_name}")
|
221 |
-
return None
|
222 |
else:
|
223 |
-
|
224 |
-
|
225 |
-
loaded_models[model_name] = InferenceClient(model_name, timeout=server_timeout, **kwargs)
|
226 |
print(f"Loaded by API: {model_name}")
|
227 |
except Exception as e:
|
228 |
-
if model_name in loaded_models.keys(): del loaded_models[model_name]
|
229 |
print(f"Failed to load by API: {model_name}")
|
230 |
print(e)
|
231 |
-
|
232 |
try:
|
233 |
-
|
234 |
-
|
|
|
235 |
except Exception as e:
|
236 |
if model_name in model_info_dict.keys(): del model_info_dict[model_name]
|
237 |
print(f"Failed to assigned by API: {model_name}")
|
238 |
print(e)
|
239 |
-
return loaded_models[model_name]
|
240 |
|
241 |
|
242 |
def load_models(models: list):
|
@@ -358,6 +356,11 @@ def warm_model(model_name: str):
|
|
358 |
print(e)
|
359 |
|
360 |
|
|
|
|
|
|
|
|
|
|
|
361 |
# https://huggingface.co/docs/api-inference/detailed_parameters
|
362 |
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
363 |
def infer_body(client: InferenceClient | gr.Interface | object, model_str: str, prompt: str, neg_prompt: str = "",
|
@@ -370,24 +373,24 @@ def infer_body(client: InferenceClient | gr.Interface | object, model_str: str,
|
|
370 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
371 |
if seed == -1: kwargs["seed"] = randomize_seed()
|
372 |
else: kwargs["seed"] = seed
|
373 |
-
if HF_TOKEN is not None: kwargs["token"] = HF_TOKEN
|
374 |
try:
|
375 |
if isinstance(client, InferenceClient):
|
376 |
image = client.text_to_image(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
377 |
elif isinstance(client, gr.Interface):
|
|
|
378 |
image = client.fn(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
379 |
else: return None
|
380 |
if isinstance(image, tuple): return None
|
381 |
return save_image(image, png_path, model_str, prompt, neg_prompt, height, width, steps, cfg, seed)
|
382 |
except Exception as e:
|
383 |
print(e)
|
384 |
-
raise Exception() from e
|
385 |
|
386 |
|
387 |
async def infer(model_name: str, prompt: str, neg_prompt: str ="", height: int = 0, width: int = 0,
|
388 |
steps: int = 0, cfg: int = 0, seed: int = -1,
|
389 |
save_path: str | None = None, timeout: float = inference_timeout):
|
390 |
-
model =
|
391 |
if not model: return None
|
392 |
task = asyncio.create_task(asyncio.to_thread(infer_body, model, model_name, prompt, neg_prompt,
|
393 |
height, width, steps, cfg, seed))
|
@@ -404,7 +407,7 @@ async def infer(model_name: str, prompt: str, neg_prompt: str ="", height: int =
|
|
404 |
print(e)
|
405 |
if not task.done(): task.cancel()
|
406 |
result = None
|
407 |
-
raise Exception() from e
|
408 |
if task.done() and result is not None:
|
409 |
with lock:
|
410 |
image = rename_image(result, model_name, save_path)
|
@@ -511,11 +514,6 @@ def get_def_model(i: int):
|
|
511 |
else: return ""
|
512 |
|
513 |
|
514 |
-
def warm_models(models: list[str]):
|
515 |
-
for model in models:
|
516 |
-
asyncio.new_event_loop().run_in_executor(None, warm_model, model)
|
517 |
-
|
518 |
-
|
519 |
def gen_image(model_name: str, prompt: str, neg_prompt: str = "", height: int = 0, width: int = 0,
|
520 |
steps: int = 0, cfg: int = 0, seed: int = -1, recom_mode = "None"):
|
521 |
if model_name in ["NA", ""]: return gr.update()
|
|
|
212 |
def load_model_api(model_name: str):
|
213 |
global loaded_models
|
214 |
global model_info_dict
|
|
|
215 |
try:
|
216 |
+
loaded = False
|
217 |
+
client = InferenceClient(timeout=5, token=HF_TOKEN)
|
218 |
+
status = client.get_model_status(model_name)
|
219 |
+
if status is None or status.framework != "diffusers" or not status.loaded or status.state not in ["Loadable", "Loaded"]:
|
220 |
print(f"Failed to load by API: {model_name}")
|
|
|
221 |
else:
|
222 |
+
loaded_models[model_name] = InferenceClient(model_name, timeout=server_timeout)
|
223 |
+
loaded = True
|
|
|
224 |
print(f"Loaded by API: {model_name}")
|
225 |
except Exception as e:
|
|
|
226 |
print(f"Failed to load by API: {model_name}")
|
227 |
print(e)
|
228 |
+
loaded = False
|
229 |
try:
|
230 |
+
if loaded:
|
231 |
+
model_info_dict[model_name] = get_t2i_model_info_dict(model_name)
|
232 |
+
print(f"Assigned by API: {model_name}")
|
233 |
except Exception as e:
|
234 |
if model_name in model_info_dict.keys(): del model_info_dict[model_name]
|
235 |
print(f"Failed to assigned by API: {model_name}")
|
236 |
print(e)
|
237 |
+
return loaded_models[model_name] if model_name in loaded_models.keys() else None
|
238 |
|
239 |
|
240 |
def load_models(models: list):
|
|
|
356 |
print(e)
|
357 |
|
358 |
|
359 |
+
def warm_models(models: list[str]):
|
360 |
+
for model in models:
|
361 |
+
asyncio.new_event_loop().run_in_executor(None, warm_model, model)
|
362 |
+
|
363 |
+
|
364 |
# https://huggingface.co/docs/api-inference/detailed_parameters
|
365 |
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
366 |
def infer_body(client: InferenceClient | gr.Interface | object, model_str: str, prompt: str, neg_prompt: str = "",
|
|
|
373 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
374 |
if seed == -1: kwargs["seed"] = randomize_seed()
|
375 |
else: kwargs["seed"] = seed
|
|
|
376 |
try:
|
377 |
if isinstance(client, InferenceClient):
|
378 |
image = client.text_to_image(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
379 |
elif isinstance(client, gr.Interface):
|
380 |
+
if HF_TOKEN is not None: kwargs["token"] = HF_TOKEN
|
381 |
image = client.fn(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
382 |
else: return None
|
383 |
if isinstance(image, tuple): return None
|
384 |
return save_image(image, png_path, model_str, prompt, neg_prompt, height, width, steps, cfg, seed)
|
385 |
except Exception as e:
|
386 |
print(e)
|
387 |
+
raise Exception(e) from e
|
388 |
|
389 |
|
390 |
async def infer(model_name: str, prompt: str, neg_prompt: str ="", height: int = 0, width: int = 0,
|
391 |
steps: int = 0, cfg: int = 0, seed: int = -1,
|
392 |
save_path: str | None = None, timeout: float = inference_timeout):
|
393 |
+
model = load_model_api(model_name)
|
394 |
if not model: return None
|
395 |
task = asyncio.create_task(asyncio.to_thread(infer_body, model, model_name, prompt, neg_prompt,
|
396 |
height, width, steps, cfg, seed))
|
|
|
407 |
print(e)
|
408 |
if not task.done(): task.cancel()
|
409 |
result = None
|
410 |
+
raise Exception(e) from e
|
411 |
if task.done() and result is not None:
|
412 |
with lock:
|
413 |
image = rename_image(result, model_name, save_path)
|
|
|
514 |
else: return ""
|
515 |
|
516 |
|
|
|
|
|
|
|
|
|
|
|
517 |
def gen_image(model_name: str, prompt: str, neg_prompt: str = "", height: int = 0, width: int = 0,
|
518 |
steps: int = 0, cfg: int = 0, seed: int = -1, recom_mode = "None"):
|
519 |
if model_name in ["NA", ""]: return gr.update()
|
model.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from hft2i import find_model_list
|
2 |
|
3 |
|
4 |
-
|
|
|
5 |
models_noob = ["John6666/noobai-xl-nai-xl-epsilonpred11version-sdxl"]
|
6 |
models_ill = ["Raelina/Raehoshi-illust-XL-3", "John6666/illustrious-xl-early-release-v0-sdxl"]
|
7 |
|
@@ -9,8 +10,6 @@ models_ill = ["Raelina/Raehoshi-illust-XL-3", "John6666/illustrious-xl-early-rel
|
|
9 |
models = [
|
10 |
"yodayo-ai/clandestine-xl-1.0",
|
11 |
"yodayo-ai/kivotos-xl-2.0",
|
12 |
-
"yodayo-ai/holodayo-xl-2.1",
|
13 |
-
"cagliostrolab/animagine-xl-3.1",
|
14 |
"votepurchase/ponyDiffusionV6XL",
|
15 |
"eienmojiki/Anything-XL",
|
16 |
"eienmojiki/Starry-XL-v5.2",
|
@@ -21,10 +20,8 @@ models = [
|
|
21 |
"Meina/MeinaMix_V11",
|
22 |
"KBlueLeaf/Kohaku-XL-Epsilon-rev3",
|
23 |
"KBlueLeaf/Kohaku-XL-Zeta",
|
24 |
-
"kayfahaarukku/UrangDiffusion-1.4",
|
25 |
"Eugeoter/artiwaifu-diffusion-2.0",
|
26 |
-
|
27 |
-
"Raelina/Raemu-XL-V4",
|
28 |
]
|
29 |
|
30 |
|
|
|
1 |
from hft2i import find_model_list
|
2 |
|
3 |
|
4 |
+
models_animagine4 = ["cagliostrolab/animagine-xl-4.0"]
|
5 |
+
models_animagine = ["cagliostrolab/animagine-xl-4.0", "cagliostrolab/animagine-xl-3.1", "yodayo-ai/kivotos-xl-2.0", "yodayo-ai/holodayo-xl-2.1", "Raelina/Rae-Diffusion-XL-V2", "Raelina/Raemu-XL-V4", "kayfahaarukku/UrangDiffusion-2.0"]
|
6 |
models_noob = ["John6666/noobai-xl-nai-xl-epsilonpred11version-sdxl"]
|
7 |
models_ill = ["Raelina/Raehoshi-illust-XL-3", "John6666/illustrious-xl-early-release-v0-sdxl"]
|
8 |
|
|
|
10 |
models = [
|
11 |
"yodayo-ai/clandestine-xl-1.0",
|
12 |
"yodayo-ai/kivotos-xl-2.0",
|
|
|
|
|
13 |
"votepurchase/ponyDiffusionV6XL",
|
14 |
"eienmojiki/Anything-XL",
|
15 |
"eienmojiki/Starry-XL-v5.2",
|
|
|
20 |
"Meina/MeinaMix_V11",
|
21 |
"KBlueLeaf/Kohaku-XL-Epsilon-rev3",
|
22 |
"KBlueLeaf/Kohaku-XL-Zeta",
|
|
|
23 |
"Eugeoter/artiwaifu-diffusion-2.0",
|
24 |
+
|
|
|
25 |
]
|
26 |
|
27 |
|