Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files
app.py
CHANGED
|
@@ -76,12 +76,16 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
| 76 |
with gr.Row():
|
| 77 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 78 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
|
| 79 |
|
| 80 |
with gr.Row():
|
| 81 |
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
|
| 82 |
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
|
| 83 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
|
| 84 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Column(scale=4):
|
|
@@ -204,7 +208,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
| 204 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 205 |
guidance_scale, num_inference_steps, model_name,
|
| 206 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 207 |
-
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
|
|
|
| 208 |
outputs=[result],
|
| 209 |
queue=True,
|
| 210 |
show_progress="full",
|
|
@@ -217,7 +222,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
| 217 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 218 |
guidance_scale, num_inference_steps, model_name,
|
| 219 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 220 |
-
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
|
|
|
| 221 |
outputs=[result],
|
| 222 |
queue=False,
|
| 223 |
show_api=True,
|
|
@@ -240,7 +246,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
| 240 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 241 |
guidance_scale, num_inference_steps, model_name,
|
| 242 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 243 |
-
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
|
|
|
| 244 |
outputs=[result],
|
| 245 |
queue=True,
|
| 246 |
show_progress="full",
|
|
|
|
| 76 |
with gr.Row():
|
| 77 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 78 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 79 |
+
gpu_duration = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
|
| 80 |
|
| 81 |
with gr.Row():
|
| 82 |
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
|
| 83 |
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
|
| 84 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
|
| 85 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
|
| 86 |
+
pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
|
| 87 |
+
clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
|
| 88 |
+
free_u = gr.Checkbox(value=False, label="FreeU")
|
| 89 |
|
| 90 |
with gr.Row():
|
| 91 |
with gr.Column(scale=4):
|
|
|
|
| 208 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 209 |
guidance_scale, num_inference_steps, model_name,
|
| 210 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 211 |
+
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
| 212 |
+
clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
|
| 213 |
outputs=[result],
|
| 214 |
queue=True,
|
| 215 |
show_progress="full",
|
|
|
|
| 222 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 223 |
guidance_scale, num_inference_steps, model_name,
|
| 224 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 225 |
+
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
| 226 |
+
clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
|
| 227 |
outputs=[result],
|
| 228 |
queue=False,
|
| 229 |
show_api=True,
|
|
|
|
| 246 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
| 247 |
guidance_scale, num_inference_steps, model_name,
|
| 248 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
| 249 |
+
sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
|
| 250 |
+
clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
|
| 251 |
outputs=[result],
|
| 252 |
queue=True,
|
| 253 |
show_progress="full",
|
dc.py
CHANGED
|
@@ -139,7 +139,7 @@ class GuiSD:
|
|
| 139 |
self.last_load = datetime.now()
|
| 140 |
self.inventory = []
|
| 141 |
|
| 142 |
-
def update_storage_models(self, storage_floor_gb=
|
| 143 |
while get_used_storage_gb() > storage_floor_gb:
|
| 144 |
if len(self.inventory) < required_inventory_for_purge:
|
| 145 |
break
|
|
@@ -726,23 +726,21 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
|
|
| 726 |
|
| 727 |
#@spaces.GPU
|
| 728 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 729 |
-
|
| 730 |
-
|
| 731 |
-
|
| 732 |
-
|
| 733 |
MAX_SEED = np.iinfo(np.int32).max
|
| 734 |
|
| 735 |
image_previews = True
|
| 736 |
load_lora_cpu = False
|
| 737 |
verbose_info = False
|
| 738 |
-
gpu_duration = 59
|
| 739 |
filename_pattern = "model,seed"
|
| 740 |
|
| 741 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
| 742 |
progress(0, desc="Preparing...")
|
| 743 |
|
| 744 |
-
if randomize_seed:
|
| 745 |
-
seed = random.randint(0, MAX_SEED)
|
| 746 |
|
| 747 |
generator = torch.Generator().manual_seed(seed).seed()
|
| 748 |
|
|
@@ -767,15 +765,15 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 767 |
progress(1, desc="Model loaded.")
|
| 768 |
progress(0, desc="Starting Inference...")
|
| 769 |
for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
| 770 |
-
guidance_scale,
|
| 771 |
lora4, lora4_wt, lora5, lora5_wt, sampler, schedule_type, schedule_prediction_type,
|
| 772 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|
| 773 |
None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
|
| 774 |
1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
|
| 775 |
False, True, 1, True, False, image_previews, False, False, filename_pattern, "./images", False, False, False, True, 1, 0.55,
|
| 776 |
-
False,
|
| 777 |
False, "", "", 0.35, True, True, False, 4, 4, 32,
|
| 778 |
-
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7,
|
| 779 |
load_lora_cpu, verbose_info, gpu_duration
|
| 780 |
):
|
| 781 |
images = stream_images if isinstance(stream_images, list) else images
|
|
@@ -787,10 +785,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 787 |
|
| 788 |
#@spaces.GPU
|
| 789 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 790 |
-
|
| 791 |
-
|
| 792 |
-
|
| 793 |
-
|
| 794 |
return gr.update()
|
| 795 |
|
| 796 |
|
|
|
|
| 139 |
self.last_load = datetime.now()
|
| 140 |
self.inventory = []
|
| 141 |
|
| 142 |
+
def update_storage_models(self, storage_floor_gb=32, required_inventory_for_purge=3):
|
| 143 |
while get_used_storage_gb() > storage_floor_gb:
|
| 144 |
if len(self.inventory) < required_inventory_for_purge:
|
| 145 |
break
|
|
|
|
| 726 |
|
| 727 |
#@spaces.GPU
|
| 728 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 729 |
+
model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
|
| 730 |
+
lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0,
|
| 731 |
+
sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
| 732 |
+
clip_skip=True, pag_scale=0.0, free_u=False, gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
|
| 733 |
MAX_SEED = np.iinfo(np.int32).max
|
| 734 |
|
| 735 |
image_previews = True
|
| 736 |
load_lora_cpu = False
|
| 737 |
verbose_info = False
|
|
|
|
| 738 |
filename_pattern = "model,seed"
|
| 739 |
|
| 740 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
| 741 |
progress(0, desc="Preparing...")
|
| 742 |
|
| 743 |
+
if randomize_seed: seed = random.randint(0, MAX_SEED)
|
|
|
|
| 744 |
|
| 745 |
generator = torch.Generator().manual_seed(seed).seed()
|
| 746 |
|
|
|
|
| 765 |
progress(1, desc="Model loaded.")
|
| 766 |
progress(0, desc="Starting Inference...")
|
| 767 |
for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
| 768 |
+
guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
| 769 |
lora4, lora4_wt, lora5, lora5_wt, sampler, schedule_type, schedule_prediction_type,
|
| 770 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|
| 771 |
None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
|
| 772 |
1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
|
| 773 |
False, True, 1, True, False, image_previews, False, False, filename_pattern, "./images", False, False, False, True, 1, 0.55,
|
| 774 |
+
False, free_u, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
|
| 775 |
False, "", "", 0.35, True, True, False, 4, 4, 32,
|
| 776 |
+
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, pag_scale,
|
| 777 |
load_lora_cpu, verbose_info, gpu_duration
|
| 778 |
):
|
| 779 |
images = stream_images if isinstance(stream_images, list) else images
|
|
|
|
| 785 |
|
| 786 |
#@spaces.GPU
|
| 787 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 788 |
+
model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
|
| 789 |
+
lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0,
|
| 790 |
+
sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
| 791 |
+
clip_skip=True, pag_scale=0.0, free_u=False, gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
|
| 792 |
return gr.update()
|
| 793 |
|
| 794 |
|