IbarakiDouji commited on
Commit
37f6f65
·
1 Parent(s): 66aee95

feat: multi version

Browse files
Files changed (3) hide show
  1. app.py +24 -5
  2. config.py +1 -1
  3. config.toml +20 -1
app.py CHANGED
@@ -13,7 +13,7 @@ from datetime import datetime
13
  from diffusers.models import AutoencoderKL
14
  from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
15
  from config import (
16
- MODEL,
17
  MIN_IMAGE_SIZE,
18
  MAX_IMAGE_SIZE,
19
  USE_TORCH_COMPILE,
@@ -90,6 +90,7 @@ def generate(
90
  guidance_scale: float = 6.0,
91
  num_inference_steps: int = 25,
92
  sampler: str = "Euler a",
 
93
  aspect_ratio_selector: str = DEFAULT_ASPECT_RATIO,
94
  use_upscaler: bool = False,
95
  upscaler_strength: float = 0.55,
@@ -133,6 +134,7 @@ def generate(
133
  width, height = utils.preprocess_image_dimensions(width, height)
134
 
135
  # Set up pipeline
 
136
  backup_scheduler = pipe.scheduler
137
  pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
138
 
@@ -148,7 +150,7 @@ def generate(
148
  "num_inference_steps": num_inference_steps,
149
  "seed": seed,
150
  "sampler": sampler,
151
- "Model": "WAI NSFW illustrious SDXL v14",
152
  "Model hash": "BDB59BAC77D94AE7A55FF893170F9554C3F349E48A1B73C0C17C0B7C6F4D41A2",
153
  }
154
 
@@ -238,17 +240,26 @@ def generate(
238
 
239
  # Model initialization
240
  if torch.cuda.is_available():
 
 
241
  try:
242
  logger.info("Loading VAE and pipeline...")
243
  vae = AutoencoderKL.from_pretrained(
244
  "madebyollin/sdxl-vae-fp16-fix",
245
  torch_dtype=torch.float16,
246
  )
247
- pipe = utils.load_pipeline(MODEL, device, vae=vae)
 
 
 
248
  logger.info("Pipeline loaded successfully on GPU!")
249
  except Exception as e:
250
  logger.error(f"Error loading VAE, falling back to default: {e}")
251
- pipe = utils.load_pipeline(MODEL, device)
 
 
 
 
252
  else:
253
  logger.warning("CUDA not available, running on CPU")
254
  # check if os.environ keys have VSCODE, if not, load the model on CPU
@@ -260,7 +271,7 @@ else:
260
 
261
  if not skip:
262
  logger.info("Loading pipeline on CPU...")
263
- pipe = utils.load_pipeline(MODEL, torch.device("cpu"))
264
  logger.info("Pipeline loaded successfully on CPU!")
265
 
266
 
@@ -296,6 +307,13 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme_5") as demo:
296
  value=True,
297
  info="Add quality-enhancing tags to your prompt automatically.",
298
  )
 
 
 
 
 
 
 
299
  with gr.Accordion(label="More Settings", open=False):
300
  with gr.Group():
301
  aspect_ratio_selector = gr.Radio(
@@ -454,6 +472,7 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme_5") as demo:
454
  guidance_scale,
455
  num_inference_steps,
456
  sampler,
 
457
  aspect_ratio_selector,
458
  use_upscaler,
459
  upscaler_strength,
 
13
  from diffusers.models import AutoencoderKL
14
  from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
15
  from config import (
16
+ MODELS,
17
  MIN_IMAGE_SIZE,
18
  MAX_IMAGE_SIZE,
19
  USE_TORCH_COMPILE,
 
90
  guidance_scale: float = 6.0,
91
  num_inference_steps: int = 25,
92
  sampler: str = "Euler a",
93
+ model_name: str = "v14",
94
  aspect_ratio_selector: str = DEFAULT_ASPECT_RATIO,
95
  use_upscaler: bool = False,
96
  upscaler_strength: float = 0.55,
 
134
  width, height = utils.preprocess_image_dimensions(width, height)
135
 
136
  # Set up pipeline
137
+ pipe = pipes[model_name]
138
  backup_scheduler = pipe.scheduler
139
  pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
140
 
 
150
  "num_inference_steps": num_inference_steps,
151
  "seed": seed,
152
  "sampler": sampler,
153
+ "Model": "WAI NSFW illustrious SDXL " + model_name,
154
  "Model hash": "BDB59BAC77D94AE7A55FF893170F9554C3F349E48A1B73C0C17C0B7C6F4D41A2",
155
  }
156
 
 
240
 
241
  # Model initialization
242
  if torch.cuda.is_available():
243
+ pipes = {}
244
+
245
  try:
246
  logger.info("Loading VAE and pipeline...")
247
  vae = AutoencoderKL.from_pretrained(
248
  "madebyollin/sdxl-vae-fp16-fix",
249
  torch_dtype=torch.float16,
250
  )
251
+
252
+ for model in MODELS:
253
+ pipes[model['name']] = utils.load_pipeline(model['path'], device, vae=vae)
254
+ logger.info(f"Pipeline for {model} loaded successfully on GPU!")
255
  logger.info("Pipeline loaded successfully on GPU!")
256
  except Exception as e:
257
  logger.error(f"Error loading VAE, falling back to default: {e}")
258
+
259
+ for model in MODELS:
260
+ if model['name'] not in pipes:
261
+ pipes[model['name']] = utils.load_pipeline(model['path'], device)
262
+ logger.info(f"Pipeline for {model} loaded successfully on GPU!")
263
  else:
264
  logger.warning("CUDA not available, running on CPU")
265
  # check if os.environ keys have VSCODE, if not, load the model on CPU
 
271
 
272
  if not skip:
273
  logger.info("Loading pipeline on CPU...")
274
+ pipe = utils.load_pipeline(MODELS[0]['name'], torch.device("cpu"))
275
  logger.info("Pipeline loaded successfully on CPU!")
276
 
277
 
 
307
  value=True,
308
  info="Add quality-enhancing tags to your prompt automatically.",
309
  )
310
+ model_name = gr.Radio(
311
+ label="Model",
312
+ choices=[model['name'] for model in MODELS],
313
+ value=MODELS[0]['name'],
314
+ container=True,
315
+ info="Select the model to use for image generation.",
316
+ )
317
  with gr.Accordion(label="More Settings", open=False):
318
  with gr.Group():
319
  aspect_ratio_selector = gr.Radio(
 
472
  guidance_scale,
473
  num_inference_steps,
474
  sampler,
475
+ model_name,
476
  aspect_ratio_selector,
477
  use_upscaler,
478
  upscaler_strength,
config.py CHANGED
@@ -26,7 +26,7 @@ config = load_config()
26
  css = load_css()
27
 
28
  # Export variables for backward compatibility
29
- MODEL = os.getenv("MODEL", config['model']['path'])
30
  MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", config['model']['min_image_size']))
31
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", config['model']['max_image_size']))
32
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", str(config['model']['use_torch_compile'])).lower() == "true"
 
26
  css = load_css()
27
 
28
  # Export variables for backward compatibility
29
+ MODELS = os.getenv("MODELS", config['models'])
30
  MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", config['model']['min_image_size']))
31
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", config['model']['max_image_size']))
32
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", str(config['model']['use_torch_compile'])).lower() == "true"
config.toml CHANGED
@@ -1,11 +1,30 @@
1
  [model]
2
- path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v14.safetensors"
3
  min_image_size = 512
4
  max_image_size = 2880
5
  use_torch_compile = false
6
  enable_cpu_offload = false
7
  output_dir = "./outputs"
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  [prompts]
10
  quality_tags = "masterpiece,best quality,amazing quality, {prompt}"
11
  default_negative = "bad quality,worst quality,worst detail,sketch,censor"
 
1
  [model]
 
2
  min_image_size = 512
3
  max_image_size = 2880
4
  use_torch_compile = false
5
  enable_cpu_offload = false
6
  output_dir = "./outputs"
7
 
8
+ [[models]]
9
+ path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v14.safetensors"
10
+ name = "v14"
11
+
12
+ [[models]]
13
+ path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v13.safetensors"
14
+ name = "v13"
15
+
16
+ [[models]]
17
+ path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v12.safetensors"
18
+ name = "v12"
19
+
20
+ [[models]]
21
+ path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v11.safetensors"
22
+ name = "v11"
23
+
24
+ [[models]]
25
+ path = "https://huggingface.co/IbarakiDouji/WAI-NSFW-illustrious-SDXL/blob/main/WAI-NSFW-illustrious-SDXL-v10.safetensors"
26
+ name = "v10"
27
+
28
  [prompts]
29
  quality_tags = "masterpiece,best quality,amazing quality, {prompt}"
30
  default_negative = "bad quality,worst quality,worst detail,sketch,censor"