John6666 commited on
Commit
d518d04
·
verified ·
1 Parent(s): 0ad387a

Upload 17 files

Browse files
Files changed (9) hide show
  1. README.md +1 -1
  2. app.py +219 -118
  3. constants.py +49 -23
  4. env.py +1 -0
  5. modutils.py +6 -6
  6. packages.txt +1 -1
  7. requirements.txt +22 -20
  8. tagger/tagger.py +1 -1
  9. tagger/v2.py +4 -5
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧩🖼️📦
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.43.0
8
  app_file: app.py
9
  pinned: true
10
  header: mini
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  header: mini
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import spaces
2
  import os
 
3
  from stablepy import (
4
  Model_Diffusers,
5
  SCHEDULE_TYPE_OPTIONS,
@@ -23,6 +24,7 @@ from constants import (
23
  DIFFUSERS_CONTROLNET_MODEL,
24
  IP_MODELS,
25
  MODE_IP_OPTIONS,
 
26
  )
27
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
28
  import torch
@@ -41,6 +43,7 @@ from utils import (
41
  progress_step_bar,
42
  html_template_message,
43
  escape_html,
 
44
  )
45
  from image_processor import preprocessor_tab
46
  from datetime import datetime
@@ -53,14 +56,17 @@ from diffusers import FluxPipeline
53
  # import urllib.parse
54
  import subprocess
55
 
56
- IS_ZEROGPU = os.getenv("SPACES_ZERO_GPU", False)
57
- if IS_ZEROGPU:
58
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
59
- torch.backends.cuda.matmul.allow_tf32 = True
60
- ImageFile.LOAD_TRUNCATED_IMAGES = True
 
 
61
 
 
 
62
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
63
- print(os.getenv("SPACES_ZERO_GPU"))
64
 
65
  ## BEGIN MOD
66
  from modutils import (list_uniq, download_private_repo, get_model_id_list, get_tupled_embed_list,
@@ -94,14 +100,11 @@ DOWNLOAD_LORA = ", ".join(DOWNLOAD_LORA_LIST)
94
 
95
  # Download stuffs
96
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
97
- if not os.path.exists(f"./models/{url.split('/')[-1]}"):
98
- download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
99
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
100
- if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
101
- download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
102
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
103
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
104
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
105
 
106
  # Download Embeddings
107
  for url_embed in DOWNLOAD_EMBEDS:
@@ -128,15 +131,16 @@ def get_embed_list(pipeline_name):
128
 
129
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
130
 
131
- flux_repo = "camenduru/FLUX.1-dev-diffusers"
132
- flux_pipe = FluxPipeline.from_pretrained(
133
- flux_repo,
134
- transformer=None,
135
- torch_dtype=torch.bfloat16,
136
- )#.to("cuda")
137
- components = flux_pipe.components
138
- delete_model(flux_repo)
139
- # components = None
 
140
 
141
  #######################
142
  # GUI
@@ -146,10 +150,19 @@ diffusers.utils.logging.set_verbosity(40)
146
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
147
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
148
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
149
- ## BEGIN MOD
150
- #logger.setLevel(logging.CRITICAL)
151
- logger.setLevel(logging.DEBUG)
152
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  from tagger.v2 import V2_ALL_MODELS, v2_random_prompt, v2_upsampling_prompt
154
  from tagger.utils import (gradio_copy_text, COPY_ACTION_JS, gradio_copy_prompt,
155
  V2_ASPECT_RATIO_OPTIONS, V2_RATING_OPTIONS, V2_LENGTH_OPTIONS, V2_IDENTITY_OPTIONS)
@@ -167,6 +180,11 @@ def description_ui():
167
  )
168
  ## END MOD
169
 
 
 
 
 
 
170
  class GuiSD:
171
  def __init__(self, stream=True):
172
  self.model = None
@@ -182,6 +200,15 @@ class GuiSD:
182
  removal_candidate = self.inventory.pop(0)
183
  delete_model(removal_candidate)
184
 
 
 
 
 
 
 
 
 
 
185
  def update_inventory(self, model_name):
186
  if model_name not in single_file_model_list:
187
  self.inventory = [
@@ -192,19 +219,24 @@ class GuiSD:
192
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
193
 
194
  # download link model > model_name
195
- if "http" in model_name: #
 
 
196
  model_name, model_type = download_link_model(model_name, DIRECTORY_MODELS) #
 
 
197
  is_link_model = True #
198
  else: is_link_model = False #
199
 
200
- self.update_storage_models()
 
201
 
202
  vae_model = vae_model if vae_model != "None" else None
203
  model_type = get_model_type(model_name) if not is_link_model else model_type #
204
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
205
 
206
  if not os.path.exists(model_name):
207
- print("debug", model_name, vae_model, task, controlnet_model)
208
  _ = download_diffuser_repo(
209
  repo_name=model_name,
210
  model_type=model_type,
@@ -249,10 +281,10 @@ class GuiSD:
249
  type_model_precision=dtype_model,
250
  retain_task_model_in_cache=False,
251
  controlnet_model=controlnet_model,
252
- device="cpu",
253
  env_components=components,
254
  )
255
- self.model.advanced_params(image_preprocessor_cuda_active=True)
256
  else:
257
  if self.model.base_model_id != model_name:
258
  load_now_time = datetime.now()
@@ -262,7 +294,8 @@ class GuiSD:
262
  print("Waiting for the previous model's time ops...")
263
  time.sleep(9 - elapsed_time)
264
 
265
- self.model.device = torch.device("cpu")
 
266
  self.model.load_pipe(
267
  model_name,
268
  task_name=TASK_STABLEPY[task],
@@ -425,7 +458,7 @@ class GuiSD:
425
  lora_scale3, lora4, lora_scale4, lora5, lora_scale5, lora6, lora_scale6, lora7, lora_scale7)
426
  ## END MOD
427
 
428
- print("Config model:", model_name, vae_model, loras_list)
429
 
430
  task = TASK_STABLEPY[task]
431
 
@@ -523,19 +556,19 @@ class GuiSD:
523
  "distance_threshold": distance_threshold,
524
  "recolor_gamma_correction": float(recolor_gamma_correction),
525
  "tile_blur_sigma": int(tile_blur_sigma),
526
- "lora_A": lora1 if lora1 != "None" else None,
527
  "lora_scale_A": lora_scale1,
528
- "lora_B": lora2 if lora2 != "None" else None,
529
  "lora_scale_B": lora_scale2,
530
- "lora_C": lora3 if lora3 != "None" else None,
531
  "lora_scale_C": lora_scale3,
532
- "lora_D": lora4 if lora4 != "None" else None,
533
  "lora_scale_D": lora_scale4,
534
- "lora_E": lora5 if lora5 != "None" else None,
535
  "lora_scale_E": lora_scale5,
536
- "lora_F": lora6 if lora6 != "None" else None,
537
  "lora_scale_F": lora_scale6,
538
- "lora_G": lora7 if lora7 != "None" else None,
539
  "lora_scale_G": lora_scale7,
540
  ## BEGIN MOD
541
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
@@ -595,11 +628,11 @@ class GuiSD:
595
  # kwargs for diffusers pipeline
596
  if guidance_rescale:
597
  pipe_params["guidance_rescale"] = guidance_rescale
598
-
599
- self.model.device = torch.device("cuda:0")
600
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
601
- self.model.pipe.transformer.to(self.model.device)
602
- print("transformer to cuda")
603
 
604
  actual_progress = 0
605
  info_images = gr.update()
@@ -629,7 +662,7 @@ class GuiSD:
629
 
630
  download_links = "<br>".join(
631
  [
632
- f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
633
  for i, path in enumerate(image_path)
634
  ]
635
  )
@@ -741,7 +774,8 @@ def sd_gen_generate_pipeline(*args):
741
 
742
  @spaces.GPU(duration=15)
743
  def process_upscale(image, upscaler_name, upscaler_size):
744
- if image is None: return None
 
745
 
746
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
747
  from stablepy import load_upscaler_model
@@ -758,7 +792,7 @@ def process_upscale(image, upscaler_name, upscaler_size):
758
 
759
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
760
 
761
- scaler_beta = load_upscaler_model(model=name_upscaler, tile=0, tile_overlap=8, device="cuda", half=True)
762
  image_up = scaler_beta.upscale(image, upscaler_size, True)
763
 
764
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
@@ -767,8 +801,8 @@ def process_upscale(image, upscaler_name, upscaler_size):
767
 
768
 
769
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
770
- dynamic_gpu_duration.zerogpu = True
771
- sd_gen_generate_pipeline.zerogpu = True
772
  sd_gen = GuiSD()
773
 
774
 
@@ -786,7 +820,7 @@ CSS ="""
786
  .desc [src$='#float'] { float: right; margin: 20px; }
787
  """
788
 
789
- with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, css=CSS, delete_cache=(60, 3600)) as app:
790
  gr.Markdown("# 🧩 DiffuseCraft Mod", elem_classes="title")
791
  gr.Markdown("This space is a modification of [r3gm's DiffuseCraft](https://huggingface.co/spaces/r3gm/DiffuseCraft).", elem_classes="info")
792
  with gr.Column():
@@ -912,7 +946,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
912
  with gr.Row():
913
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
914
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
915
- num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
916
  clip_skip_gui = gr.Checkbox(value=False, label="Layer 2 Clip Skip")
917
  free_u_gui = gr.Checkbox(value=False, label="FreeU")
918
  with gr.Row():
@@ -939,7 +973,22 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
939
  "Schedule type": gr.update(value="Automatic"),
940
  "PAG": gr.update(value=.0),
941
  "FreeU": gr.update(value=False),
 
 
 
 
 
 
 
 
 
942
  }
 
 
 
 
 
 
943
  valid_keys = list(valid_receptors.keys())
944
 
945
  parameters = extract_parameters(base_prompt)
@@ -953,6 +1002,36 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
953
  parameters["Sampler"] = value_sampler
954
  parameters["Schedule type"] = s_type
955
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
956
  for key, val in parameters.items():
957
  # print(val)
958
  if key in valid_keys:
@@ -960,9 +1039,12 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
960
  if key == "Sampler":
961
  if val not in scheduler_names:
962
  continue
963
- if key == "Schedule type":
964
  if val not in SCHEDULE_TYPE_OPTIONS:
965
- val = "Automatic"
 
 
 
966
  elif key == "Clip skip":
967
  if "," in str(val):
968
  val = val.replace(",", "")
@@ -970,15 +1052,15 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
970
  val = True
971
  if key == "prompt":
972
  if ">" in val and "<" in val:
973
- val = re.sub(r'<[^>]+>', '', val)
974
  print("Removed LoRA written in the prompt")
975
  if key in ["prompt", "neg_prompt"]:
976
  val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
977
- if key in ["Steps", "width", "height", "Seed"]:
978
  val = int(val)
979
  if key == "FreeU":
980
  val = True
981
- if key in ["CFG scale", "PAG"]:
982
  val = float(val)
983
  if key == "Model":
984
  filtered_models = [m for m in model_list if val in m]
@@ -986,8 +1068,12 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
986
  val = filtered_models[0]
987
  else:
988
  val = name_model
 
 
 
989
  if key == "Seed":
990
  continue
 
991
  valid_receptors[key] = gr.update(value=val)
992
  # print(val, type(val))
993
  # print(valid_receptors)
@@ -995,24 +1081,6 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
995
  print(str(e))
996
  return [value for value in valid_receptors.values()]
997
 
998
- set_params_gui.click(
999
- run_set_params_gui, [prompt_gui, model_name_gui], [
1000
- prompt_gui,
1001
- neg_prompt_gui,
1002
- steps_gui,
1003
- img_width_gui,
1004
- img_height_gui,
1005
- seed_gui,
1006
- sampler_gui,
1007
- cfg_gui,
1008
- clip_skip_gui,
1009
- model_name_gui,
1010
- schedule_type_gui,
1011
- pag_scale_gui,
1012
- free_u_gui,
1013
- ],
1014
- )
1015
-
1016
  def run_clear_prompt_gui():
1017
  return gr.update(value=""), gr.update(value="")
1018
  clear_prompt_gui.click(
@@ -1030,8 +1098,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1030
  return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
1031
 
1032
  def lora_scale_slider(label, visible=True):
1033
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label, visible=visible)
1034
-
 
1035
  def lora_textbox(label):
1036
  return gr.Textbox(label=label, info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
1037
 
@@ -1077,16 +1146,16 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1077
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1078
  lora5_desc_gui = gr.Markdown(value="", visible=False)
1079
  with gr.Column():
1080
- lora6_gui = lora_dropdown("LoRA6", visible=False)
1081
- lora_scale_6_gui = lora_scale_slider("LoRA Scale 6", visible=False)
1082
  with gr.Row():
1083
  with gr.Group():
1084
  lora6_info_gui = lora_textbox("LoRA6 prompts")
1085
  lora6_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1086
  lora6_desc_gui = gr.Markdown(value="", visible=False)
1087
  with gr.Column():
1088
- lora7_gui = lora_dropdown("LoRA7", visible=False)
1089
- lora_scale_7_gui = lora_scale_slider("LoRA Scale 7", visible=False)
1090
  with gr.Row():
1091
  with gr.Group():
1092
  lora7_info_gui = lora_textbox("LoRA7 prompts")
@@ -1108,7 +1177,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1108
  search_civitai_result_lora = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
1109
  with gr.Row():
1110
  text_lora = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1, scale=4)
1111
- romanize_text = gr.Checkbox(value=False, label="Transliterate name", scale=1, visible=False)
1112
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1113
  new_lora_status = gr.HTML()
1114
  with gr.Accordion("From Local", open=True, visible=True):
@@ -1119,8 +1188,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1119
  with gr.Accordion("Hires fix", open=False, visible=True) as menu_hires:
1120
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
1121
  with gr.Row():
1122
- upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.0, label="Upscale by")
1123
- upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=0, label="Upscaler Tile Size", info="0 = no tiling")
1124
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
1125
  with gr.Row():
1126
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
@@ -1230,8 +1299,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1230
  )
1231
 
1232
  with gr.Row():
1233
- value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1234
- distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1235
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1236
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1237
 
@@ -1296,20 +1365,62 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1296
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1297
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1298
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1299
-
 
 
 
 
1300
  with gr.Accordion("More settings", open=False, visible=False):
1301
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1302
  retain_task_cache_gui = gr.Checkbox(value=True, label="Retain task model in cache")
1303
- leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1304
- disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1305
  display_images_gui = gr.Checkbox(value=False, label="Display Images")
1306
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1307
- image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1308
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1309
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1310
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1311
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313
  with gr.Accordion("Examples and help", open=True, visible=True) as menu_example:
1314
  gr.Examples(
1315
  examples=EXAMPLES_GUI,
@@ -1337,36 +1448,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1337
 
1338
  with gr.Tab("Inpaint mask maker", render=True):
1339
 
1340
- def create_mask_now(img, invert):
1341
- import numpy as np
1342
- import time
1343
-
1344
- time.sleep(0.5)
1345
-
1346
- transparent_image = img["layers"][0]
1347
-
1348
- # Extract the alpha channel
1349
- alpha_channel = np.array(transparent_image)[:, :, 3]
1350
-
1351
- # Create a binary mask by thresholding the alpha channel
1352
- binary_mask = alpha_channel > 1
1353
-
1354
- if invert:
1355
- print("Invert")
1356
- # Invert the binary mask so that the drawn shape is white and the rest is black
1357
- binary_mask = np.invert(binary_mask)
1358
-
1359
- # Convert the binary mask to a 3-channel RGB mask
1360
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
1361
-
1362
- # Convert the mask to uint8
1363
- rgb_mask = rgb_mask.astype(np.uint8) * 255
1364
-
1365
- return img["background"], rgb_mask
1366
-
1367
  with gr.Row():
1368
  with gr.Column(scale=2):
1369
- # image_base = gr.ImageEditor(label="Base image", show_label=True, brush=gr.Brush(colors=["#000000"]))
1370
  image_base = gr.ImageEditor(
1371
  sources=["upload", "clipboard"],
1372
  # crop_size="1:1",
@@ -1383,10 +1466,21 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1383
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1384
  ]
1385
  ),
1386
- eraser=gr.Eraser(default_size="16")
 
 
 
1387
  )
 
 
 
 
 
 
 
1388
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1389
  btn = gr.Button("Create mask")
 
1390
  with gr.Column(scale=1):
1391
  img_source = gr.Image(interactive=False)
1392
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
@@ -1780,6 +1874,13 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1780
  gr.LoginButton()
1781
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1782
 
1783
- app.queue()
1784
- app.launch(show_error=True, debug=True) # allowed_paths=["./images/"], show_error=True, debug=True
 
 
 
 
 
 
 
1785
  ## END MOD
 
1
  import spaces
2
  import os
3
+ from argparse import ArgumentParser
4
  from stablepy import (
5
  Model_Diffusers,
6
  SCHEDULE_TYPE_OPTIONS,
 
24
  DIFFUSERS_CONTROLNET_MODEL,
25
  IP_MODELS,
26
  MODE_IP_OPTIONS,
27
+ CACHE_HF_ROOT,
28
  )
29
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
30
  import torch
 
43
  progress_step_bar,
44
  html_template_message,
45
  escape_html,
46
+ clear_hf_cache,
47
  )
48
  from image_processor import preprocessor_tab
49
  from datetime import datetime
 
56
  # import urllib.parse
57
  import subprocess
58
 
59
+ IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
60
+ if IS_ZERO_GPU:
61
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
62
+ IS_GPU_MODE = True if IS_ZERO_GPU else (True if torch.cuda.is_available() else False)
63
+ img_path = "./images/"
64
+ allowed_path = os.path.abspath(img_path)
65
+ delete_cache_time = (9600, 9600) if IS_ZERO_GPU else (86400, 86400)
66
 
67
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
68
+ torch.backends.cuda.matmul.allow_tf32 = True
69
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
 
70
 
71
  ## BEGIN MOD
72
  from modutils import (list_uniq, download_private_repo, get_model_id_list, get_tupled_embed_list,
 
100
 
101
  # Download stuffs
102
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
103
+ download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
 
104
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
105
+ download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
 
106
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
107
+ download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
 
108
 
109
  # Download Embeddings
110
  for url_embed in DOWNLOAD_EMBEDS:
 
131
 
132
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
133
 
134
+ components = None
135
+ if IS_ZERO_GPU:
136
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
137
+ flux_pipe = FluxPipeline.from_pretrained(
138
+ flux_repo,
139
+ transformer=None,
140
+ torch_dtype=torch.bfloat16,
141
+ )#.to("cuda")
142
+ components = flux_pipe.components
143
+ delete_model(flux_repo)
144
 
145
  #######################
146
  # GUI
 
150
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
151
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
152
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
 
 
 
153
 
154
+ parser = ArgumentParser(description='DiffuseCraft: Create images from text prompts.', add_help=True)
155
+ parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
156
+ parser.add_argument('--theme', type=str, default="NoCrypt/miku", help='Set the theme (default: NoCrypt/miku)')
157
+ parser.add_argument("--ssr", action="store_true", default=False, help="Enable SSR (Server-Side Rendering)") #
158
+ parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Set logging level (default: INFO)")
159
+ args = parser.parse_args()
160
+
161
+ logger.setLevel(
162
+ "INFO" if IS_ZERO_GPU else getattr(logging, args.log_level.upper())
163
+ )
164
+
165
+ ## BEGIN MOD
166
  from tagger.v2 import V2_ALL_MODELS, v2_random_prompt, v2_upsampling_prompt
167
  from tagger.utils import (gradio_copy_text, COPY_ACTION_JS, gradio_copy_prompt,
168
  V2_ASPECT_RATIO_OPTIONS, V2_RATING_OPTIONS, V2_LENGTH_OPTIONS, V2_IDENTITY_OPTIONS)
 
180
  )
181
  ## END MOD
182
 
183
+ def lora_chk(lora_):
184
+ if isinstance(lora_, str) and lora_.strip() not in ["", "None"]:
185
+ return lora_
186
+ return None
187
+
188
  class GuiSD:
189
  def __init__(self, stream=True):
190
  self.model = None
 
200
  removal_candidate = self.inventory.pop(0)
201
  delete_model(removal_candidate)
202
 
203
+ # Cleanup after 60 seconds of inactivity
204
+ lowPrioCleanup = max((datetime.now() - self.last_load).total_seconds(), 0) > 60
205
+ if lowPrioCleanup and not self.status_loading and get_used_storage_gb(CACHE_HF_ROOT) > (storage_floor_gb * 2):
206
+ print("Cleaning up Hugging Face cache...")
207
+ clear_hf_cache()
208
+ self.inventory = [
209
+ m for m in self.inventory if os.path.exists(m)
210
+ ]
211
+
212
  def update_inventory(self, model_name):
213
  if model_name not in single_file_model_list:
214
  self.inventory = [
 
219
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
220
 
221
  # download link model > model_name
222
+ if model_name.startswith("http"): #
223
+ yield f"Downloading model: {model_name}"
224
+ #model_name = download_things(DIRECTORY_MODELS, model_name, HF_TOKEN, CIVITAI_API_KEY)
225
  model_name, model_type = download_link_model(model_name, DIRECTORY_MODELS) #
226
+ if not model_name:
227
+ raise ValueError("Error retrieving model information from URL")
228
  is_link_model = True #
229
  else: is_link_model = False #
230
 
231
+ if IS_ZERO_GPU:
232
+ self.update_storage_models()
233
 
234
  vae_model = vae_model if vae_model != "None" else None
235
  model_type = get_model_type(model_name) if not is_link_model else model_type #
236
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
237
 
238
  if not os.path.exists(model_name):
239
+ logger.debug(f"model_name={model_name}, vae_model={vae_model}, task={task}, controlnet_model={controlnet_model}")
240
  _ = download_diffuser_repo(
241
  repo_name=model_name,
242
  model_type=model_type,
 
281
  type_model_precision=dtype_model,
282
  retain_task_model_in_cache=False,
283
  controlnet_model=controlnet_model,
284
+ device="cpu" if IS_ZERO_GPU else None,
285
  env_components=components,
286
  )
287
+ self.model.advanced_params(image_preprocessor_cuda_active=IS_GPU_MODE)
288
  else:
289
  if self.model.base_model_id != model_name:
290
  load_now_time = datetime.now()
 
294
  print("Waiting for the previous model's time ops...")
295
  time.sleep(9 - elapsed_time)
296
 
297
+ if IS_ZERO_GPU:
298
+ self.model.device = torch.device("cpu")
299
  self.model.load_pipe(
300
  model_name,
301
  task_name=TASK_STABLEPY[task],
 
458
  lora_scale3, lora4, lora_scale4, lora5, lora_scale5, lora6, lora_scale6, lora7, lora_scale7)
459
  ## END MOD
460
 
461
+ logger.debug(f"Config model: {model_name}, {vae_model}, {loras_list}")
462
 
463
  task = TASK_STABLEPY[task]
464
 
 
556
  "distance_threshold": distance_threshold,
557
  "recolor_gamma_correction": float(recolor_gamma_correction),
558
  "tile_blur_sigma": int(tile_blur_sigma),
559
+ "lora_A": lora_chk(lora1),
560
  "lora_scale_A": lora_scale1,
561
+ "lora_B": lora_chk(lora2),
562
  "lora_scale_B": lora_scale2,
563
+ "lora_C": lora_chk(lora3),
564
  "lora_scale_C": lora_scale3,
565
+ "lora_D": lora_chk(lora4),
566
  "lora_scale_D": lora_scale4,
567
+ "lora_E": lora_chk(lora5),
568
  "lora_scale_E": lora_scale5,
569
+ "lora_F": lora_chk(lora6),
570
  "lora_scale_F": lora_scale6,
571
+ "lora_G": lora_chk(lora7),
572
  "lora_scale_G": lora_scale7,
573
  ## BEGIN MOD
574
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
 
628
  # kwargs for diffusers pipeline
629
  if guidance_rescale:
630
  pipe_params["guidance_rescale"] = guidance_rescale
631
+ if IS_ZERO_GPU:
632
+ self.model.device = torch.device("cuda:0")
633
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
634
+ self.model.pipe.transformer.to(self.model.device)
635
+ logger.debug("transformer to cuda")
636
 
637
  actual_progress = 0
638
  info_images = gr.update()
 
662
 
663
  download_links = "<br>".join(
664
  [
665
+ f'<a href="{path.replace("/images/", f"/gradio_api/file={allowed_path}/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
666
  for i, path in enumerate(image_path)
667
  ]
668
  )
 
774
 
775
  @spaces.GPU(duration=15)
776
  def process_upscale(image, upscaler_name, upscaler_size):
777
+ if image is None:
778
+ return None
779
 
780
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
781
  from stablepy import load_upscaler_model
 
792
 
793
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
794
 
795
+ scaler_beta = load_upscaler_model(model=name_upscaler, tile=(0 if IS_ZERO_GPU else 192), tile_overlap=8, device=("cuda" if IS_GPU_MODE else "cpu"), half=IS_GPU_MODE)
796
  image_up = scaler_beta.upscale(image, upscaler_size, True)
797
 
798
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
 
801
 
802
 
803
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
804
+ # dynamic_gpu_duration.zerogpu = True
805
+ # sd_gen_generate_pipeline.zerogpu = True
806
  sd_gen = GuiSD()
807
 
808
 
 
820
  .desc [src$='#float'] { float: right; margin: 20px; }
821
  """
822
 
823
+ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=False, css=CSS) as app:
824
  gr.Markdown("# 🧩 DiffuseCraft Mod", elem_classes="title")
825
  gr.Markdown("This space is a modification of [r3gm's DiffuseCraft](https://huggingface.co/spaces/r3gm/DiffuseCraft).", elem_classes="info")
826
  with gr.Column():
 
946
  with gr.Row():
947
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
948
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
949
+ num_images_gui = gr.Slider(minimum=1, maximum=(5 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
950
  clip_skip_gui = gr.Checkbox(value=False, label="Layer 2 Clip Skip")
951
  free_u_gui = gr.Checkbox(value=False, label="FreeU")
952
  with gr.Row():
 
973
  "Schedule type": gr.update(value="Automatic"),
974
  "PAG": gr.update(value=.0),
975
  "FreeU": gr.update(value=False),
976
+ "Hires upscaler": gr.update(),
977
+ "Hires upscale": gr.update(),
978
+ "Hires steps": gr.update(),
979
+ "Hires denoising strength": gr.update(),
980
+ "Hires CFG": gr.update(),
981
+ "Hires sampler": gr.update(),
982
+ "Hires schedule type": gr.update(),
983
+ "Image resolution": gr.update(value=1024),
984
+ "Strength": gr.update(),
985
  }
986
+
987
+ # Generate up to 7 LoRAs
988
+ for i in range(1, 8):
989
+ valid_receptors[f"Lora_{i}"] = gr.update()
990
+ valid_receptors[f"Lora_scale_{i}"] = gr.update()
991
+
992
  valid_keys = list(valid_receptors.keys())
993
 
994
  parameters = extract_parameters(base_prompt)
 
1002
  parameters["Sampler"] = value_sampler
1003
  parameters["Schedule type"] = s_type
1004
 
1005
+ params_lora = []
1006
+ if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
1007
+ params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
1008
+ if "Loras" in parameters:
1009
+ params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
1010
+
1011
+ if params_lora:
1012
+ parsed_params = []
1013
+ for tag_l in params_lora:
1014
+ try:
1015
+ inner = tag_l.strip("<>") # remove < >
1016
+ _, data_l = inner.split(":", 1) # remove the "lora:" part
1017
+ parts_l = data_l.split(":")
1018
+
1019
+ name_l = parts_l[0]
1020
+ weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
1021
+
1022
+ parsed_params.append((name_l, weight_l))
1023
+ except Exception as e:
1024
+ print(f"Error parsing LoRA tag {tag_l}: {e}")
1025
+
1026
+ num_lora = 1
1027
+ for parsed_l, parsed_s in parsed_params:
1028
+ filtered_loras = [m for m in lora_model_list if parsed_l in m]
1029
+ if filtered_loras:
1030
+ parameters[f"Lora_{num_lora}"] = filtered_loras[0]
1031
+ parameters[f"Lora_scale_{num_lora}"] = parsed_s
1032
+ num_lora += 1
1033
+
1034
+ # continue = discard new value
1035
  for key, val in parameters.items():
1036
  # print(val)
1037
  if key in valid_keys:
 
1039
  if key == "Sampler":
1040
  if val not in scheduler_names:
1041
  continue
1042
+ if key in ["Schedule type", "Hires schedule type"]:
1043
  if val not in SCHEDULE_TYPE_OPTIONS:
1044
+ continue
1045
+ if key == "Hires sampler":
1046
+ if val not in POST_PROCESSING_SAMPLER:
1047
+ continue
1048
  elif key == "Clip skip":
1049
  if "," in str(val):
1050
  val = val.replace(",", "")
 
1052
  val = True
1053
  if key == "prompt":
1054
  if ">" in val and "<" in val:
1055
+ val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
1056
  print("Removed LoRA written in the prompt")
1057
  if key in ["prompt", "neg_prompt"]:
1058
  val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
1059
+ if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
1060
  val = int(val)
1061
  if key == "FreeU":
1062
  val = True
1063
+ if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
1064
  val = float(val)
1065
  if key == "Model":
1066
  filtered_models = [m for m in model_list if val in m]
 
1068
  val = filtered_models[0]
1069
  else:
1070
  val = name_model
1071
+ if key == "Hires upscaler":
1072
+ if val not in UPSCALER_KEYS:
1073
+ continue
1074
  if key == "Seed":
1075
  continue
1076
+
1077
  valid_receptors[key] = gr.update(value=val)
1078
  # print(val, type(val))
1079
  # print(valid_receptors)
 
1081
  print(str(e))
1082
  return [value for value in valid_receptors.values()]
1083
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084
  def run_clear_prompt_gui():
1085
  return gr.update(value=""), gr.update(value="")
1086
  clear_prompt_gui.click(
 
1098
  return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
1099
 
1100
  def lora_scale_slider(label, visible=True):
1101
+ val_lora = 8 if IS_ZERO_GPU else 8 #
1102
+ return gr.Slider(minimum=-val_lora, maximum=val_lora, step=0.01, value=0.33, label=label, visible=visible)
1103
+
1104
  def lora_textbox(label):
1105
  return gr.Textbox(label=label, info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
1106
 
 
1146
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1147
  lora5_desc_gui = gr.Markdown(value="", visible=False)
1148
  with gr.Column():
1149
+ lora6_gui = lora_dropdown("LoRA6", visible=(not IS_ZERO_GPU))
1150
+ lora_scale_6_gui = lora_scale_slider("LoRA Scale 6", visible=(not IS_ZERO_GPU))
1151
  with gr.Row():
1152
  with gr.Group():
1153
  lora6_info_gui = lora_textbox("LoRA6 prompts")
1154
  lora6_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1155
  lora6_desc_gui = gr.Markdown(value="", visible=False)
1156
  with gr.Column():
1157
+ lora7_gui = lora_dropdown("LoRA7", visible=(not IS_ZERO_GPU))
1158
+ lora_scale_7_gui = lora_scale_slider("LoRA Scale 7", visible=(not IS_ZERO_GPU))
1159
  with gr.Row():
1160
  with gr.Group():
1161
  lora7_info_gui = lora_textbox("LoRA7 prompts")
 
1177
  search_civitai_result_lora = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
1178
  with gr.Row():
1179
  text_lora = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1, scale=4)
1180
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=(not IS_ZERO_GPU))
1181
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1182
  new_lora_status = gr.HTML()
1183
  with gr.Accordion("From Local", open=True, visible=True):
 
1188
  with gr.Accordion("Hires fix", open=False, visible=True) as menu_hires:
1189
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
1190
  with gr.Row():
1191
+ upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.2, label="Upscale by")
1192
+ upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=(0 if IS_ZERO_GPU else 192), label="Upscaler Tile Size", info="0 = no tiling")
1193
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
1194
  with gr.Row():
1195
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
 
1299
  )
1300
 
1301
  with gr.Row():
1302
+ value_threshold_gui = gr.Slider(minimum=0.0, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1303
+ distance_threshold_gui = gr.Slider(minimum=0.0, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1304
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1305
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1306
 
 
1365
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1366
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1367
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1368
+ with gr.Column(visible=(not IS_ZERO_GPU)):
1369
+ image_storage_location_gui = gr.Textbox(value=img_path, label="Image Storage Location")
1370
+ disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1371
+ leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1372
+
1373
  with gr.Accordion("More settings", open=False, visible=False):
1374
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1375
  retain_task_cache_gui = gr.Checkbox(value=True, label="Retain task model in cache")
 
 
1376
  display_images_gui = gr.Checkbox(value=False, label="Display Images")
1377
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
 
1378
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1379
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1380
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1381
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1382
 
1383
+ set_params_gui.click(
1384
+ run_set_params_gui, [prompt_gui, model_name_gui], [
1385
+ prompt_gui,
1386
+ neg_prompt_gui,
1387
+ steps_gui,
1388
+ img_width_gui,
1389
+ img_height_gui,
1390
+ seed_gui,
1391
+ sampler_gui,
1392
+ cfg_gui,
1393
+ clip_skip_gui,
1394
+ model_name_gui,
1395
+ schedule_type_gui,
1396
+ pag_scale_gui,
1397
+ free_u_gui,
1398
+ upscaler_model_path_gui,
1399
+ upscaler_increases_size_gui,
1400
+ hires_steps_gui,
1401
+ hires_denoising_strength_gui,
1402
+ hires_guidance_scale_gui,
1403
+ hires_sampler_gui,
1404
+ hires_schedule_type_gui,
1405
+ image_resolution_gui,
1406
+ strength_gui,
1407
+ lora1_gui,
1408
+ lora_scale_1_gui,
1409
+ lora2_gui,
1410
+ lora_scale_2_gui,
1411
+ lora3_gui,
1412
+ lora_scale_3_gui,
1413
+ lora4_gui,
1414
+ lora_scale_4_gui,
1415
+ lora5_gui,
1416
+ lora_scale_5_gui,
1417
+ lora6_gui,
1418
+ lora_scale_6_gui,
1419
+ lora7_gui,
1420
+ lora_scale_7_gui,
1421
+ ],
1422
+ )
1423
+
1424
  with gr.Accordion("Examples and help", open=True, visible=True) as menu_example:
1425
  gr.Examples(
1426
  examples=EXAMPLES_GUI,
 
1448
 
1449
  with gr.Tab("Inpaint mask maker", render=True):
1450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451
  with gr.Row():
1452
  with gr.Column(scale=2):
 
1453
  image_base = gr.ImageEditor(
1454
  sources=["upload", "clipboard"],
1455
  # crop_size="1:1",
 
1466
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1467
  ]
1468
  ),
1469
+ eraser=gr.Eraser(default_size="16"),
1470
+ render=True,
1471
+ visible=False,
1472
+ interactive=False,
1473
  )
1474
+
1475
+ show_canvas = gr.Button("SHOW INPAINT CANVAS")
1476
+
1477
+ def change_visibility_canvas():
1478
+ return gr.update(visible=True, interactive=True), gr.update(visible=False)
1479
+ show_canvas.click(change_visibility_canvas, [], [image_base, show_canvas])
1480
+
1481
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1482
  btn = gr.Button("Create mask")
1483
+
1484
  with gr.Column(scale=1):
1485
  img_source = gr.Image(interactive=False)
1486
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
 
1874
  gr.LoginButton()
1875
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1876
 
1877
+ if __name__ == "__main__":
1878
+ app.queue()
1879
+ app.launch(
1880
+ show_error=True,
1881
+ share=args.share_enabled,
1882
+ debug=True,
1883
+ ssr_mode=args.ssr,
1884
+ allowed_paths=[allowed_path],
1885
+ )
1886
  ## END MOD
constants.py CHANGED
@@ -9,6 +9,8 @@ from stablepy import (
9
  IP_ADAPTERS_SDXL,
10
  )
11
 
 
 
12
  # - **Download Models**
13
  DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
14
 
@@ -23,12 +25,12 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
23
  'Laxhar/noobai-XL-1.1',
24
  'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
 
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
28
  'John6666/xe-anime-flux-v04-fp8-flux',
29
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
30
  'John6666/carnival-unchained-v10-fp8-flux',
31
- 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
32
  'Freepik/flux.1-lite-8B-alpha',
33
  'shauray/FluxDev-HyperSD-merged',
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
@@ -37,23 +39,19 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
37
  # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
- 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
41
  'Laxhar/noobai-XL-0.77',
42
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
43
  'Laxhar/noobai-XL-0.6',
44
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
45
  'John6666/noobai-cyberfix-v10-sdxl',
46
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
47
- 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
48
- 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
49
- 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
50
- 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
- 'John6666/hassaku-xl-illustrious-v10-sdxl',
56
- 'John6666/hassaku-xl-illustrious-v10style-sdxl',
57
  'John6666/haruki-mix-illustrious-v10-sdxl',
58
  'John6666/noobreal-v10-sdxl',
59
  'John6666/complicated-noobai-merge-vprediction-sdxl',
@@ -64,6 +62,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
64
  'Laxhar/noobai-XL-Vpred-0.65',
65
  'Laxhar/noobai-XL-Vpred-0.6',
66
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
 
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -74,19 +73,34 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
74
  'John6666/obsession-illustriousxl-v21-sdxl',
75
  'John6666/obsession-illustriousxl-v30-sdxl',
76
  'John6666/obsession-illustriousxl-v31-sdxl',
 
 
 
 
 
77
  'John6666/wai-nsfw-illustrious-v70-sdxl',
 
78
  'John6666/illustrious-pony-mix-v3-sdxl',
79
- 'John6666/nova-anime-xl-illustriousv10-sdxl',
80
- 'John6666/nova-orange-xl-v30-sdxl',
 
 
 
 
 
 
 
81
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
82
  'eienmojiki/Anything-XL',
83
  'eienmojiki/Starry-XL-v5.2',
 
84
  'John6666/meinaxl-v2-sdxl',
85
  'Eugeoter/artiwaifu-diffusion-2.0',
86
  'comin/IterComp',
87
- 'John6666/epicrealism-xl-vxiabeast-sdxl',
88
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
89
  'John6666/epicrealism-xl-v8kiss-sdxl',
 
 
 
90
  'misri/zavychromaxl_v80',
91
  'SG161222/RealVisXL_V4.0',
92
  'SG161222/RealVisXL_V5.0',
@@ -102,8 +116,10 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
102
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
103
  'John6666/t-ponynai3-v51-sdxl',
104
  'John6666/t-ponynai3-v65-sdxl',
 
105
  'John6666/prefect-pony-xl-v3-sdxl',
106
  'John6666/prefect-pony-xl-v4-sdxl',
 
107
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
108
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
109
  'John6666/wai-real-mix-v11-sdxl',
@@ -111,13 +127,14 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
111
  'John6666/wai-c-v6-sdxl',
112
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
113
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
114
  'John6666/photo-realistic-pony-v5-sdxl',
115
  'John6666/pony-realism-v21main-sdxl',
116
  'John6666/pony-realism-v22main-sdxl',
117
- 'John6666/cyberrealistic-pony-v63-sdxl',
118
- 'John6666/cyberrealistic-pony-v64-sdxl',
119
  'John6666/cyberrealistic-pony-v65-sdxl',
120
  'John6666/cyberrealistic-pony-v7-sdxl',
 
121
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
122
  'John6666/nova-anime-xl-pony-v5-sdxl',
123
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
@@ -127,13 +144,15 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
127
  'John6666/duchaiten-pony-real-v11fix-sdxl',
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
- 'Spestly/OdysseyXL-3.0',
131
- 'Spestly/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
 
134
  'yodayo-ai/kivotos-xl-2.0',
135
  'yodayo-ai/holodayo-xl-2.1',
136
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
137
  'digiplay/majicMIX_sombre_v2',
138
  'digiplay/majicMIX_realistic_v6',
139
  'digiplay/majicMIX_realistic_v7',
@@ -159,9 +178,9 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
159
  'GraydientPlatformAPI/realcartoon3d-17',
160
  'GraydientPlatformAPI/realcartoon-pixar11',
161
  'GraydientPlatformAPI/realcartoon-real17',
162
- 'nitrosocke/Ghibli-Diffusion',
163
  ]
164
 
 
165
  DIFFUSERS_FORMAT_LORAS = [
166
  "nerijs/animation2k-flux",
167
  "XLabs-AI/flux-RealismLora",
@@ -183,8 +202,11 @@ DIRECTORY_VAES = 'vaes'
183
  DIRECTORY_EMBEDS = 'embedings'
184
  DIRECTORY_UPSCALERS = 'upscalers'
185
 
186
- CACHE_HF = "/home/user/.cache/huggingface/hub/"
187
  STORAGE_ROOT = "/home/user/"
 
 
 
 
188
 
189
  TASK_STABLEPY = {
190
  'txt2img': 'txt2img',
@@ -226,6 +248,7 @@ UPSCALER_DICT_GUI = {
226
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
227
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
228
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
 
229
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
230
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
231
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
@@ -359,9 +382,11 @@ SUBTITLE_GUI = (
359
  " to perform different tasks in image generation."
360
  )
361
 
 
 
362
  HELP_GUI = (
363
- """### Help:
364
- - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
365
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
366
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
367
  """
@@ -485,7 +510,7 @@ EXAMPLES_GUI = [
485
  20,
486
  4.0,
487
  -1,
488
- "loras/Coloring_book_-_LineArt.safetensors",
489
  1.0,
490
  "DPM++ 2M SDE",
491
  1024,
@@ -580,6 +605,7 @@ EXAMPLES_GUI = [
580
  RESOURCES = (
581
  """### Resources
582
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
583
- - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
 
584
  """
585
- )
 
9
  IP_ADAPTERS_SDXL,
10
  )
11
 
12
+ IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
13
+
14
  # - **Download Models**
15
  DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
16
 
 
25
  'Laxhar/noobai-XL-1.1',
26
  'Laxhar/noobai-XL-Vpred-1.0',
27
  'black-forest-labs/FLUX.1-dev',
28
+ 'black-forest-labs/FLUX.1-Krea-dev',
29
  'John6666/blue-pencil-flux1-v021-fp8-flux',
30
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
31
  'John6666/xe-anime-flux-v04-fp8-flux',
32
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
33
  'John6666/carnival-unchained-v10-fp8-flux',
 
34
  'Freepik/flux.1-lite-8B-alpha',
35
  'shauray/FluxDev-HyperSD-merged',
36
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
 
39
  # 'ostris/OpenFLUX.1',
40
  'shuttleai/shuttle-3-diffusion',
41
  'Laxhar/noobai-XL-1.0',
 
42
  'Laxhar/noobai-XL-0.77',
43
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
44
  'Laxhar/noobai-XL-0.6',
45
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
46
  'John6666/noobai-cyberfix-v10-sdxl',
47
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
48
+ 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
49
+ 'John6666/sigmaih-15-sdxl',
 
 
50
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
53
  'John6666/mistoon-anime-v10illustrious-sdxl',
54
+ 'John6666/hassaku-xl-illustrious-v22-sdxl',
 
55
  'John6666/haruki-mix-illustrious-v10-sdxl',
56
  'John6666/noobreal-v10-sdxl',
57
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
62
  'Laxhar/noobai-XL-Vpred-0.65',
63
  'Laxhar/noobai-XL-Vpred-0.6',
64
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
65
+ 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
66
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
67
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
68
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
73
  'John6666/obsession-illustriousxl-v21-sdxl',
74
  'John6666/obsession-illustriousxl-v30-sdxl',
75
  'John6666/obsession-illustriousxl-v31-sdxl',
76
+ 'John6666/one-obsession-13-sdxl',
77
+ 'John6666/one-obsession-14-24d-sdxl',
78
+ 'John6666/one-obsession-15-noobai-sdxl',
79
+ 'John6666/one-obsession-v16-noobai-sdxl',
80
+ 'John6666/prefect-illustrious-xl-v3-sdxl',
81
  'John6666/wai-nsfw-illustrious-v70-sdxl',
82
+ 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
83
  'John6666/illustrious-pony-mix-v3-sdxl',
84
+ 'John6666/nova-anime-xl-il-v90-sdxl',
85
+ 'John6666/nova-anime-xl-il-v110-sdxl',
86
+ 'John6666/nova-orange-xl-re-v10-sdxl',
87
+ 'John6666/nova-orange-xl-v110-sdxl',
88
+ 'John6666/nova-orange-xl-re-v20-sdxl',
89
+ 'John6666/nova-unreal-xl-v60-sdxl',
90
+ 'John6666/nova-unreal-xl-v70-sdxl',
91
+ 'John6666/nova-unreal-xl-v80-sdxl',
92
+ 'John6666/nova-cartoon-xl-v40-sdxl',
93
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
94
  'eienmojiki/Anything-XL',
95
  'eienmojiki/Starry-XL-v5.2',
96
+ 'votepurchase/plantMilkModelSuite_walnut',
97
  'John6666/meinaxl-v2-sdxl',
98
  'Eugeoter/artiwaifu-diffusion-2.0',
99
  'comin/IterComp',
 
 
100
  'John6666/epicrealism-xl-v8kiss-sdxl',
101
+ 'John6666/epicrealism-xl-v10kiss2-sdxl',
102
+ 'John6666/epicrealism-xl-vxiabeast-sdxl',
103
+ 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
104
  'misri/zavychromaxl_v80',
105
  'SG161222/RealVisXL_V4.0',
106
  'SG161222/RealVisXL_V5.0',
 
116
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
117
  'John6666/t-ponynai3-v51-sdxl',
118
  'John6666/t-ponynai3-v65-sdxl',
119
+ 'John6666/t-ponynai3-v7-sdxl',
120
  'John6666/prefect-pony-xl-v3-sdxl',
121
  'John6666/prefect-pony-xl-v4-sdxl',
122
+ 'John6666/prefect-pony-xl-v50-sdxl',
123
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
124
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
125
  'John6666/wai-real-mix-v11-sdxl',
 
127
  'John6666/wai-c-v6-sdxl',
128
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
129
  'John6666/sifw-annihilation-xl-v2-sdxl',
130
+ 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
131
  'John6666/photo-realistic-pony-v5-sdxl',
132
  'John6666/pony-realism-v21main-sdxl',
133
  'John6666/pony-realism-v22main-sdxl',
134
+ 'John6666/pony-realism-v23-ultra-sdxl',
 
135
  'John6666/cyberrealistic-pony-v65-sdxl',
136
  'John6666/cyberrealistic-pony-v7-sdxl',
137
+ 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
138
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
139
  'John6666/nova-anime-xl-pony-v5-sdxl',
140
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
 
144
  'John6666/duchaiten-pony-real-v11fix-sdxl',
145
  'John6666/duchaiten-pony-real-v20-sdxl',
146
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
 
 
147
  'KBlueLeaf/Kohaku-XL-Zeta',
148
  'cagliostrolab/animagine-xl-3.1',
149
+ 'cagliostrolab/animagine-xl-4.0',
150
  'yodayo-ai/kivotos-xl-2.0',
151
  'yodayo-ai/holodayo-xl-2.1',
152
  'yodayo-ai/clandestine-xl-1.0',
153
+ 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
154
+ 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
155
+ 'https://civitai.com/models/30240?modelVersionId=125771',
156
  'digiplay/majicMIX_sombre_v2',
157
  'digiplay/majicMIX_realistic_v6',
158
  'digiplay/majicMIX_realistic_v7',
 
178
  'GraydientPlatformAPI/realcartoon3d-17',
179
  'GraydientPlatformAPI/realcartoon-pixar11',
180
  'GraydientPlatformAPI/realcartoon-real17',
 
181
  ]
182
 
183
+
184
  DIFFUSERS_FORMAT_LORAS = [
185
  "nerijs/animation2k-flux",
186
  "XLabs-AI/flux-RealismLora",
 
202
  DIRECTORY_EMBEDS = 'embedings'
203
  DIRECTORY_UPSCALERS = 'upscalers'
204
 
 
205
  STORAGE_ROOT = "/home/user/"
206
+ CACHE_HF_ROOT = os.path.expanduser(os.getenv("HF_HOME")) if os.getenv("HF_HOME") else os.path.expanduser("~/cache/huggingface")
207
+ CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
208
+ if IS_ZERO_GPU:
209
+ os.environ["HF_HOME"] = CACHE_HF
210
 
211
  TASK_STABLEPY = {
212
  'txt2img': 'txt2img',
 
248
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
249
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
250
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
251
+ "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
252
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
253
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
254
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
 
382
  " to perform different tasks in image generation."
383
  )
384
 
385
+ msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
386
+
387
  HELP_GUI = (
388
+ f"""### Help:
389
+ {msg_zero}
390
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
391
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
392
  """
 
510
  20,
511
  4.0,
512
  -1,
513
+ ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
514
  1.0,
515
  "DPM++ 2M SDE",
516
  1024,
 
605
  RESOURCES = (
606
  """### Resources
607
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
608
+ - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
609
+ - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
610
  """
611
+ )
env.py CHANGED
@@ -39,6 +39,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
39
  'Raelina/Raehoshi-illust-XL-4',
40
  'Raelina/Raehoshi-illust-XL-5',
41
  'Raelina/Raehoshi-illust-XL-5.1',
 
42
  'camenduru/FLUX.1-dev-diffusers',
43
  'black-forest-labs/FLUX.1-schnell',
44
  'sayakpaul/FLUX.1-merged',
 
39
  'Raelina/Raehoshi-illust-XL-4',
40
  'Raelina/Raehoshi-illust-XL-5',
41
  'Raelina/Raehoshi-illust-XL-5.1',
42
+ 'Raelina/Raehoshi-illust-XL-6',
43
  'camenduru/FLUX.1-dev-diffusers',
44
  'black-forest-labs/FLUX.1-schnell',
45
  'sayakpaul/FLUX.1-merged',
modutils.py CHANGED
@@ -1730,7 +1730,7 @@ EXAMPLES_GUI = [
1730
  "votepurchase/ponyDiffusionV6XL",
1731
  ],
1732
  [
1733
- "1girl, oomuro sakurako, yuru yuri, official art, school uniform, anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1734
  "photo, deformed, black and white, realism, disfigured, low contrast, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1735
  1,
1736
  40,
@@ -1743,17 +1743,17 @@ EXAMPLES_GUI = [
1743
  "Raelina/Rae-Diffusion-XL-V2",
1744
  ],
1745
  [
1746
- "1girl, akaza akari, yuru yuri, official art, anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1747
- "photo, deformed, black and white, realism, disfigured, low contrast, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1748
  1,
1749
- 35,
1750
- 7.0,
1751
  True,
1752
  -1,
1753
  "Euler",
1754
  1024,
1755
  1024,
1756
- "Raelina/Raemu-XL-V5",
1757
  ],
1758
  [
1759
  "yoshida yuuko, machikado mazoku, 1girl, solo, demon horns,horns, school uniform, long hair, open mouth, skirt, demon girl, ahoge, shiny, shiny hair, anime artwork",
 
1730
  "votepurchase/ponyDiffusionV6XL",
1731
  ],
1732
  [
1733
+ "1girl, oomuro sakurako, yuru yuri, official art, school uniform, anime artwork, anime style, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1734
  "photo, deformed, black and white, realism, disfigured, low contrast, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1735
  1,
1736
  40,
 
1743
  "Raelina/Rae-Diffusion-XL-V2",
1744
  ],
1745
  [
1746
+ "1girl, akaza akari, yuru yuri, official art, anime screencap, anime coloring, masterpiece, best quality, absurdres",
1747
+ "bad quality, worst quality, poorly drawn, sketch, multiple views, bad anatomy, bad hands, missing fingers, extra fingers, extra digits, fewer digits, signature, watermark, username",
1748
  1,
1749
+ 28,
1750
+ 5.5,
1751
  True,
1752
  -1,
1753
  "Euler",
1754
  1024,
1755
  1024,
1756
+ "Raelina/Raehoshi-illust-XL-6",
1757
  ],
1758
  [
1759
  "yoshida yuuko, machikado mazoku, 1girl, solo, demon horns,horns, school uniform, long hair, open mouth, skirt, demon girl, ahoge, shiny, shiny hair, anime artwork",
packages.txt CHANGED
@@ -1 +1 @@
1
- git-lfs aria2 -y ffmpeg
 
1
+ git-lfs aria2 ffmpeg
requirements.txt CHANGED
@@ -1,21 +1,23 @@
1
- stablepy==0.6.1
2
- diffusers<=0.32.0
3
- transformers==4.47.1
4
- torch==2.4.0
5
- numpy<2
6
- gdown
7
- opencv-python
8
- torchvision
9
- accelerate
10
- optimum[onnxruntime]
11
- dartrs
12
- huggingface_hub
13
- hf_transfer
14
- hf_xet
15
- translatepy
16
- timm
17
- rapidfuzz
18
- sentencepiece
19
- unidecode
20
- ultralytics>=8.3.47
 
 
21
  pydantic==2.10.6
 
1
+ stablepy==0.6.2
2
+ diffusers
3
+ transformers<=4.48.3
4
+ accelerate
5
+ huggingface_hub
6
+ hf_transfer
7
+ hf_xet
8
+ torch==2.5.1
9
+ torchvision
10
+ numpy<2
11
+ gdown
12
+ opencv-python
13
+ optimum[onnxruntime]
14
+ #dartrs
15
+ git+https://github.com/John6666cat/dartrs
16
+ translatepy
17
+ timm
18
+ rapidfuzz
19
+ sentencepiece
20
+ unidecode
21
+ matplotlib-inline
22
+ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.4.11/flash_attn-2.8.3+cu124torch2.5-cp310-cp310-linux_x86_64.whl
23
  pydantic==2.10.6
tagger/tagger.py CHANGED
@@ -504,7 +504,7 @@ def gen_prompt(rating: list[str], character: list[str], general: list[str]):
504
  return ", ".join(all_tags)
505
 
506
 
507
- @spaces.GPU(duration=30)
508
  def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
509
  inputs = wd_processor.preprocess(image, return_tensors="pt")
510
 
 
504
  return ", ".join(all_tags)
505
 
506
 
507
+ @spaces.GPU(duration=10)
508
  def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
509
  inputs = wd_processor.preprocess(image, return_tensors="pt")
510
 
tagger/v2.py CHANGED
@@ -1,4 +1,4 @@
1
- import time
2
  import torch
3
  from typing import Callable
4
  from pathlib import Path
@@ -16,16 +16,15 @@ from dartrs.v2 import (
16
  from dartrs.dartrs import DartTokenizer
17
  from dartrs.utils import get_generation_config
18
 
19
-
20
  import gradio as gr
21
  from gradio.components import Component
22
 
23
-
24
  try:
25
  from output import UpsamplingOutput
26
  except:
27
  from .output import UpsamplingOutput
28
 
 
29
 
30
  V2_ALL_MODELS = {
31
  "dart-v2-moe-sft": {
@@ -43,8 +42,8 @@ V2_ALL_MODELS = {
43
 
44
  def prepare_models(model_config: dict):
45
  model_name = model_config["repo"]
46
- tokenizer = DartTokenizer.from_pretrained(model_name)
47
- model = model_config["class"].from_pretrained(model_name)
48
 
49
  return {
50
  "tokenizer": tokenizer,
 
1
+ import time, os
2
  import torch
3
  from typing import Callable
4
  from pathlib import Path
 
16
  from dartrs.dartrs import DartTokenizer
17
  from dartrs.utils import get_generation_config
18
 
 
19
  import gradio as gr
20
  from gradio.components import Component
21
 
 
22
  try:
23
  from output import UpsamplingOutput
24
  except:
25
  from .output import UpsamplingOutput
26
 
27
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
28
 
29
  V2_ALL_MODELS = {
30
  "dart-v2-moe-sft": {
 
42
 
43
  def prepare_models(model_config: dict):
44
  model_name = model_config["repo"]
45
+ tokenizer = DartTokenizer.from_pretrained(model_name, auth_token=HF_TOKEN)
46
+ model = model_config["class"].from_pretrained(model_name, auth_token=HF_TOKEN)
47
 
48
  return {
49
  "tokenizer": tokenizer,