John6666 commited on
Commit
7a3899b
·
verified ·
1 Parent(s): d321de7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +15 -14
  2. requirements.txt +1 -1
app.py CHANGED
@@ -344,9 +344,8 @@ class GuiSD:
344
  vae_model=None,
345
  type_model_precision=torch.float16,
346
  retain_task_model_in_cache=False,
347
- #device="cpu",
348
  )
349
- self.model.device = torch.device("cpu") #
350
 
351
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
352
  progress(0, desc="Start inference...")
@@ -488,9 +487,9 @@ class GuiSD:
488
  model_ip2,
489
  mode_ip2,
490
  scale_ip2,
491
- progress=gr.Progress(track_tqdm=True),
492
  ):
493
- progress(0, desc="Preparing inference...")
494
 
495
  vae_model = vae_model if vae_model != "None" else None
496
  loras_list = [lora1, lora2, lora3, lora4, lora5]
@@ -713,15 +712,17 @@ class GuiSD:
713
  # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
714
  self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
715
 
716
- progress(0, desc="Preparation completed. Starting inference...")
717
  info_state = f"PROCESSING "
718
- info_state += ">"
719
- info_state = f"COMPLETED. Seeds: {str(seed)}"
720
- if vae_msg:
721
- info_state = info_state + "<br>" + vae_msg
722
- if msg_lora:
723
- info_state = info_state + "<br>" + "<br>".join(msg_lora)
724
- yield self.infer_short(self.model, pipe_params, progress), info_state
 
 
725
 
726
  sd_gen = GuiSD()
727
 
@@ -1165,7 +1166,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1165
 
1166
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
1167
  with gr.Row():
1168
- image_previews_gui = gr.Checkbox(value=False, label="Image Previews")
1169
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1170
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1171
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
@@ -1646,7 +1647,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1646
  copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt_gui])
1647
 
1648
  gr.LoginButton()
1649
- gr.DuplicateButton(value="Duplicate Space for private use. (This demo does not work on CPU. Requires GPU Space.)")
1650
 
1651
  app.queue()
1652
  app.launch() #show_error=True, debug=True
 
344
  vae_model=None,
345
  type_model_precision=torch.float16,
346
  retain_task_model_in_cache=False,
347
+ device="cpu",
348
  )
 
349
 
350
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
351
  progress(0, desc="Start inference...")
 
487
  model_ip2,
488
  mode_ip2,
489
  scale_ip2,
490
+ #progress=gr.Progress(track_tqdm=True),
491
  ):
492
+ #progress(0, desc="Preparing inference...")
493
 
494
  vae_model = vae_model if vae_model != "None" else None
495
  loras_list = [lora1, lora2, lora3, lora4, lora5]
 
712
  # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
713
  self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
714
 
715
+ #progress(0, desc="Preparation completed. Starting inference...")
716
  info_state = f"PROCESSING "
717
+ for img, seed, data in self.model(**pipe_params):
718
+ info_state += ">"
719
+ if data:
720
+ info_state = f"COMPLETED. Seeds: {str(seed)}"
721
+ if vae_msg:
722
+ info_state = info_state + "<br>" + vae_msg
723
+ if msg_lora:
724
+ info_state = info_state + "<br>" + "<br>".join(msg_lora)
725
+ yield img, info_state
726
 
727
  sd_gen = GuiSD()
728
 
 
1166
 
1167
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
1168
  with gr.Row():
1169
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1170
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1171
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1172
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
 
1647
  copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt_gui])
1648
 
1649
  gr.LoginButton()
1650
+ gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1651
 
1652
  app.queue()
1653
  app.launch() #show_error=True, debug=True
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- git+https://github.com/R3gm/stablepy.git@dev2
2
  torch==2.2.0
3
  gdown
4
  opencv-python
 
1
+ git+https://github.com/R3gm/stablepy.git
2
  torch==2.2.0
3
  gdown
4
  opencv-python