1inkusFace commited on
Commit
401cafb
·
verified ·
1 Parent(s): 72e76e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -11
app.py CHANGED
@@ -17,7 +17,7 @@ from typing import Tuple
17
  import paramiko
18
  import datetime
19
  #import diffusers
20
- from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, EulerAncestralDiscreteScheduler
21
  from diffusers.models.attention_processor import Attention, AttnProcessor2_0
22
  from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
23
 
@@ -105,6 +105,7 @@ def load_and_prepare_model():
105
  unet=unet,
106
  vae=None,
107
  )
 
108
  #pipe.scheduler=scheduler
109
  #pipe.tokenizer=tokenizer_1
110
  #pipe.tokenizer_2=tokenizer_2
@@ -117,13 +118,24 @@ def load_and_prepare_model():
117
  pipe.watermark=None
118
  pipe.safety_checker=None
119
  #pipe.unet.to(memory_format=torch.channels_last)
120
- pipe.to(device=device, dtype=torch.bfloat16)
121
  pipe.vae = vaeXL.to(device) #.to('cpu') #.to(torch.bfloat16)
122
  pipe.vae.set_default_attn_processor()
123
  return pipe
124
 
125
  pipe = load_and_prepare_model()
126
 
 
 
 
 
 
 
 
 
 
 
 
127
  MAX_SEED = np.iinfo(np.int32).max
128
 
129
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
@@ -196,15 +208,31 @@ def generate_30(
196
  "guidance_scale": guidance_scale,
197
  "num_inference_steps": num_inference_steps,
198
  "generator": generator,
199
- "output_type": "pil",
 
200
  }
201
  if use_resolution_binning:
202
  options["use_resolution_binning"] = True
203
  images = []
204
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
205
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
206
  batch_options = options.copy()
207
- rv_image = pipe(**batch_options).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  sd_image_path = f"rv50_G_{timestamp}.png"
209
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
210
  upload_to_ftp(sd_image_path)
@@ -238,15 +266,31 @@ def generate_60(
238
  "guidance_scale": guidance_scale,
239
  "num_inference_steps": num_inference_steps,
240
  "generator": generator,
241
- "output_type": "pil",
 
242
  }
243
  if use_resolution_binning:
244
  options["use_resolution_binning"] = True
245
  images = []
246
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
247
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
248
  batch_options = options.copy()
249
- rv_image = pipe(**batch_options).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  sd_image_path = f"rv50_G_{timestamp}.png"
251
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
252
  upload_to_ftp(sd_image_path)
@@ -280,15 +324,31 @@ def generate_90(
280
  "guidance_scale": guidance_scale,
281
  "num_inference_steps": num_inference_steps,
282
  "generator": generator,
283
- "output_type": "pil",
 
284
  }
285
  if use_resolution_binning:
286
  options["use_resolution_binning"] = True
287
  images = []
288
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
289
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
290
  batch_options = options.copy()
291
- rv_image = pipe(**batch_options).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  sd_image_path = f"rv50_G_{timestamp}.png"
293
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
294
  upload_to_ftp(sd_image_path)
 
17
  import paramiko
18
  import datetime
19
  #import diffusers
20
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, AutoencoderKL, EulerAncestralDiscreteScheduler
21
  from diffusers.models.attention_processor import Attention, AttnProcessor2_0
22
  from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
23
 
 
105
  unet=unet,
106
  vae=None,
107
  )
108
+
109
  #pipe.scheduler=scheduler
110
  #pipe.tokenizer=tokenizer_1
111
  #pipe.tokenizer_2=tokenizer_2
 
118
  pipe.watermark=None
119
  pipe.safety_checker=None
120
  #pipe.unet.to(memory_format=torch.channels_last)
121
+ pipe.to(device=device)
122
  pipe.vae = vaeXL.to(device) #.to('cpu') #.to(torch.bfloat16)
123
  pipe.vae.set_default_attn_processor()
124
  return pipe
125
 
126
  pipe = load_and_prepare_model()
127
 
128
+ sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/stable-diffusion-xl-refiner-1.0-bf16', subfolder='scheduler',beta_schedule="scaled_linear", token=HF_TOKEN)
129
+
130
+ refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained(
131
+ "ford442/stable-diffusion-xl-refiner-1.0-bf16",
132
+ requires_aesthetics_score=True,
133
+ token=HF_TOKEN
134
+ )
135
+
136
+ refiner.vae=vaeX
137
+ refiner.to(device=device)
138
+
139
  MAX_SEED = np.iinfo(np.int32).max
140
 
141
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
 
208
  "guidance_scale": guidance_scale,
209
  "num_inference_steps": num_inference_steps,
210
  "generator": generator,
211
+ "output_type": "latent",
212
+ "denoising_end": 0.75,
213
  }
214
  if use_resolution_binning:
215
  options["use_resolution_binning"] = True
216
  images = []
 
217
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
218
  batch_options = options.copy()
219
+ rv_image = pipe(**batch_options).images
220
+ options = {
221
+ "prompt": [prompt],
222
+ "image": rv_image,
223
+ "negative_prompt": [negative_prompt],
224
+ "negative_prompt_2": [neg_prompt_2],
225
+ "width": width,
226
+ "height": height,
227
+ "guidance_scale": guidance_scale,
228
+ "num_inference_steps": num_inference_steps,
229
+ "generator": generator,
230
+ "output_type": "latent",
231
+ "denoising_start": 0.75,
232
+ }
233
+ batch_options = options.copy()
234
+ rv_image = refiner(**batch_options).images[0]
235
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
236
  sd_image_path = f"rv50_G_{timestamp}.png"
237
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
238
  upload_to_ftp(sd_image_path)
 
266
  "guidance_scale": guidance_scale,
267
  "num_inference_steps": num_inference_steps,
268
  "generator": generator,
269
+ "output_type": "latent",
270
+ "denoising_end": 0.75,
271
  }
272
  if use_resolution_binning:
273
  options["use_resolution_binning"] = True
274
  images = []
 
275
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
276
  batch_options = options.copy()
277
+ rv_image = pipe(**batch_options).images
278
+ options = {
279
+ "prompt": [prompt],
280
+ "image": rv_image,
281
+ "negative_prompt": [negative_prompt],
282
+ "negative_prompt_2": [neg_prompt_2],
283
+ "width": width,
284
+ "height": height,
285
+ "guidance_scale": guidance_scale,
286
+ "num_inference_steps": num_inference_steps,
287
+ "generator": generator,
288
+ "output_type": "latent",
289
+ "denoising_start": 0.75,
290
+ }
291
+ batch_options = options.copy()
292
+ rv_image = refiner(**batch_options).images[0]
293
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
294
  sd_image_path = f"rv50_G_{timestamp}.png"
295
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
296
  upload_to_ftp(sd_image_path)
 
324
  "guidance_scale": guidance_scale,
325
  "num_inference_steps": num_inference_steps,
326
  "generator": generator,
327
+ "output_type": "latent",
328
+ "denoising_end": 0.75,
329
  }
330
  if use_resolution_binning:
331
  options["use_resolution_binning"] = True
332
  images = []
 
333
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
334
  batch_options = options.copy()
335
+ rv_image = pipe(**batch_options).images
336
+ options = {
337
+ "prompt": [prompt],
338
+ "image": rv_image,
339
+ "negative_prompt": [negative_prompt],
340
+ "negative_prompt_2": [neg_prompt_2],
341
+ "width": width,
342
+ "height": height,
343
+ "guidance_scale": guidance_scale,
344
+ "num_inference_steps": num_inference_steps,
345
+ "generator": generator,
346
+ "output_type": "latent",
347
+ "denoising_start": 0.75,
348
+ }
349
+ batch_options = options.copy()
350
+ rv_image = refiner(**batch_options).images[0]
351
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
352
  sd_image_path = f"rv50_G_{timestamp}.png"
353
  rv_image.save(sd_image_path,optimize=False,compress_level=0)
354
  upload_to_ftp(sd_image_path)