John6666 commited on
Commit
020ca85
β€’
1 Parent(s): cac72ef

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. genimage.py +6 -3
app.py CHANGED
@@ -104,8 +104,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
104
  queue=True,
105
  show_progress="full",
106
  trigger_mode="once",
107
- ).success(dolphin_parse, [chatbot, state], [output_text, copy_btn, copy_btn_pony]).success(
108
- convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
109
  ).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
110
  ).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
111
  chat_clear.click(lambda: None, None, chatbot, queue=False)
 
104
  queue=True,
105
  show_progress="full",
106
  trigger_mode="once",
107
+ ).success(dolphin_parse, [chatbot, state], [output_text, copy_btn, copy_btn_pony]
108
+ ).success(convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
109
  ).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
110
  ).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
111
  chat_clear.click(lambda: None, None, chatbot, queue=False)
genimage.py CHANGED
@@ -2,16 +2,18 @@ import spaces
2
  import torch
3
 
4
 
 
 
 
5
  def load_pipeline():
6
  from diffusers import DiffusionPipeline
7
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
8
  pipe = DiffusionPipeline.from_pretrained(
9
  "John6666/rae-diffusion-xl-v2-sdxl-spo-pcm",
10
  custom_pipeline="lpw_stable_diffusion_xl",
11
  #custom_pipeline="nyanko7/sdxl_smoothed_energy_guidance",
12
  torch_dtype=torch.float16,
13
  )
14
- pipe.to(device)
15
  return pipe
16
 
17
 
@@ -65,6 +67,7 @@ pipe = load_pipeline()
65
  @torch.inference_mode()
66
  @spaces.GPU
67
  def generate_image(prompt, neg_prompt):
 
68
  prompt += ", anime, masterpiece, best quality, very aesthetic, absurdres"
69
  neg_prompt += ", bad hands, bad feet, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], photo, deformed, disfigured, low contrast, photo, deformed, disfigured, low contrast"
70
  metadata = {
@@ -85,7 +88,7 @@ def generate_image(prompt, neg_prompt):
85
  guidance_scale=7.0,# seg_scale=3.0, seg_applied_layers=["mid"],
86
  num_inference_steps=28,
87
  output_type="pil",
88
- clip_skip=1,
89
  ).images
90
  if images:
91
  image_paths = [
 
2
  import torch
3
 
4
 
5
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
6
+
7
+
8
  def load_pipeline():
9
  from diffusers import DiffusionPipeline
 
10
  pipe = DiffusionPipeline.from_pretrained(
11
  "John6666/rae-diffusion-xl-v2-sdxl-spo-pcm",
12
  custom_pipeline="lpw_stable_diffusion_xl",
13
  #custom_pipeline="nyanko7/sdxl_smoothed_energy_guidance",
14
  torch_dtype=torch.float16,
15
  )
16
+ pipe.to("cpu")
17
  return pipe
18
 
19
 
 
67
  @torch.inference_mode()
68
  @spaces.GPU
69
  def generate_image(prompt, neg_prompt):
70
+ pipe.to(device)
71
  prompt += ", anime, masterpiece, best quality, very aesthetic, absurdres"
72
  neg_prompt += ", bad hands, bad feet, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], photo, deformed, disfigured, low contrast, photo, deformed, disfigured, low contrast"
73
  metadata = {
 
88
  guidance_scale=7.0,# seg_scale=3.0, seg_applied_layers=["mid"],
89
  num_inference_steps=28,
90
  output_type="pil",
91
+ clip_skip=2,
92
  ).images
93
  if images:
94
  image_paths = [