Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +8 -8
- modutils.py +2 -2
app.py
CHANGED
|
@@ -399,7 +399,7 @@ class GuiSD:
|
|
| 399 |
device="cpu",
|
| 400 |
)
|
| 401 |
self.model.load_beta_styles()
|
| 402 |
-
|
| 403 |
|
| 404 |
def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
|
| 405 |
|
|
@@ -646,15 +646,15 @@ class GuiSD:
|
|
| 646 |
"high_threshold": high_threshold,
|
| 647 |
"value_threshold": value_threshold,
|
| 648 |
"distance_threshold": distance_threshold,
|
| 649 |
-
"lora_A": lora1 if lora1 != "None" else None,
|
| 650 |
"lora_scale_A": lora_scale1,
|
| 651 |
-
"lora_B": lora2 if lora2 != "None" else None,
|
| 652 |
"lora_scale_B": lora_scale2,
|
| 653 |
-
"lora_C": lora3 if lora3 != "None" else None,
|
| 654 |
"lora_scale_C": lora_scale3,
|
| 655 |
-
"lora_D": lora4 if lora4 != "None" else None,
|
| 656 |
"lora_scale_D": lora_scale4,
|
| 657 |
-
"lora_E": lora5 if lora5 != "None" else None,
|
| 658 |
"lora_scale_E": lora_scale5,
|
| 659 |
## BEGIN MOD
|
| 660 |
"textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
|
|
@@ -704,7 +704,7 @@ class GuiSD:
|
|
| 704 |
}
|
| 705 |
|
| 706 |
self.model.device = torch.device("cuda:0")
|
| 707 |
-
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
|
| 708 |
self.model.pipe.transformer.to(self.model.device)
|
| 709 |
print("transformer to cuda")
|
| 710 |
|
|
@@ -1707,7 +1707,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
|
|
| 1707 |
outputs=[result_images, actual_task_info],
|
| 1708 |
queue=True,
|
| 1709 |
show_progress="full",
|
| 1710 |
-
).success(save_gallery_images, [result_images], [result_images, result_images_files
|
| 1711 |
|
| 1712 |
with gr.Tab("Danbooru Tags Transformer with WD Tagger", render=True):
|
| 1713 |
with gr.Column(scale=2):
|
|
|
|
| 399 |
device="cpu",
|
| 400 |
)
|
| 401 |
self.model.load_beta_styles()
|
| 402 |
+
self.model.device = torch.device("cpu") #
|
| 403 |
|
| 404 |
def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
|
| 405 |
|
|
|
|
| 646 |
"high_threshold": high_threshold,
|
| 647 |
"value_threshold": value_threshold,
|
| 648 |
"distance_threshold": distance_threshold,
|
| 649 |
+
"lora_A": lora1 if lora1 != "None" and lora1 != "" else None,
|
| 650 |
"lora_scale_A": lora_scale1,
|
| 651 |
+
"lora_B": lora2 if lora2 != "None" and lora2 != "" else None,
|
| 652 |
"lora_scale_B": lora_scale2,
|
| 653 |
+
"lora_C": lora3 if lora3 != "None" and lora3 != "" else None,
|
| 654 |
"lora_scale_C": lora_scale3,
|
| 655 |
+
"lora_D": lora4 if lora4 != "None" and lora4 != "" else None,
|
| 656 |
"lora_scale_D": lora_scale4,
|
| 657 |
+
"lora_E": lora5 if lora5 != "None" and lora5 != "" else None,
|
| 658 |
"lora_scale_E": lora_scale5,
|
| 659 |
## BEGIN MOD
|
| 660 |
"textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
|
|
|
|
| 704 |
}
|
| 705 |
|
| 706 |
self.model.device = torch.device("cuda:0")
|
| 707 |
+
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5 and loras_list != [""] * 5:
|
| 708 |
self.model.pipe.transformer.to(self.model.device)
|
| 709 |
print("transformer to cuda")
|
| 710 |
|
|
|
|
| 1707 |
outputs=[result_images, actual_task_info],
|
| 1708 |
queue=True,
|
| 1709 |
show_progress="full",
|
| 1710 |
+
).success(save_gallery_images, [result_images], [result_images, result_images_files], queue=False, show_api=False)
|
| 1711 |
|
| 1712 |
with gr.Tab("Danbooru Tags Transformer with WD Tagger", render=True):
|
| 1713 |
with gr.Column(scale=2):
|
modutils.py
CHANGED
|
@@ -136,7 +136,7 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
| 136 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
| 137 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
| 138 |
i = 1
|
| 139 |
-
if not images: return images
|
| 140 |
output_images = []
|
| 141 |
output_paths = []
|
| 142 |
for image in images:
|
|
@@ -153,7 +153,7 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
| 153 |
output_paths.append(str(newpath))
|
| 154 |
output_images.append((str(newpath), str(filename)))
|
| 155 |
progress(1, desc="Gallery updated.")
|
| 156 |
-
return gr.update(value=output_images), gr.update(value=output_paths
|
| 157 |
|
| 158 |
|
| 159 |
def download_private_repo(repo_id, dir_path, is_replace):
|
|
|
|
| 136 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
| 137 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
| 138 |
i = 1
|
| 139 |
+
if not images: return images, gr.update(visible=False)
|
| 140 |
output_images = []
|
| 141 |
output_paths = []
|
| 142 |
for image in images:
|
|
|
|
| 153 |
output_paths.append(str(newpath))
|
| 154 |
output_images.append((str(newpath), str(filename)))
|
| 155 |
progress(1, desc="Gallery updated.")
|
| 156 |
+
return gr.update(value=output_images), gr.update(value=output_paths, visible=True)
|
| 157 |
|
| 158 |
|
| 159 |
def download_private_repo(repo_id, dir_path, is_replace):
|