Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,7 @@ from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, Auto
|
|
| 11 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
| 12 |
from controlnet_aux import PidiNetDetector, HEDdetector
|
| 13 |
from diffusers.utils import load_image
|
| 14 |
-
from huggingface_hub import HfApi
|
| 15 |
from pathlib import Path
|
| 16 |
from PIL import Image, ImageOps
|
| 17 |
import cv2
|
|
@@ -124,41 +124,37 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str
|
|
| 124 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
| 125 |
return p.replace("{prompt}", positive), n + negative
|
| 126 |
|
| 127 |
-
|
| 128 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 129 |
|
| 130 |
eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
|
| 131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
)
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
)
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 145 |
-
"John6666/pony-realism-v21main-sdxl",
|
| 146 |
-
controlnet=controlnet,
|
| 147 |
-
vae=vae,
|
| 148 |
-
torch_dtype=torch.float16,
|
| 149 |
-
scheduler=eulera_scheduler,
|
| 150 |
)
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 154 |
-
"John6666/pony-realism-v21main-sdxl",
|
| 155 |
-
controlnet=controlnet_canny,
|
| 156 |
-
vae=vae,
|
| 157 |
-
safety_checker=None,
|
| 158 |
-
torch_dtype=torch.float16,
|
| 159 |
-
scheduler=eulera_scheduler,
|
| 160 |
)
|
| 161 |
-
|
|
|
|
|
|
|
|
|
|
| 162 |
|
| 163 |
MAX_SEED = np.iinfo(np.int32).max
|
| 164 |
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
|
@@ -189,6 +185,7 @@ def run(
|
|
| 189 |
image: dict,
|
| 190 |
prompt: str,
|
| 191 |
negative_prompt: str,
|
|
|
|
| 192 |
style_name: str = DEFAULT_STYLE_NAME,
|
| 193 |
num_steps: int = 25,
|
| 194 |
guidance_scale: float = 5,
|
|
@@ -233,8 +230,16 @@ def run(
|
|
| 233 |
|
| 234 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
if use_canny:
|
| 237 |
-
out =
|
| 238 |
prompt=prompt,
|
| 239 |
negative_prompt=negative_prompt,
|
| 240 |
image=image,
|
|
@@ -273,7 +278,11 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 273 |
with gr.Group():
|
| 274 |
image = gr.ImageEditor(type="pil", label="Sketch your image or upload one", width=512, height=512)
|
| 275 |
prompt = gr.Textbox(label="Prompt")
|
| 276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|
| 278 |
use_canny = gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
|
| 279 |
run_button = gr.Button("Run")
|
|
@@ -321,6 +330,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 321 |
image,
|
| 322 |
prompt,
|
| 323 |
negative_prompt,
|
|
|
|
| 324 |
style,
|
| 325 |
num_steps,
|
| 326 |
guidance_scale,
|
|
|
|
| 11 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
| 12 |
from controlnet_aux import PidiNetDetector, HEDdetector
|
| 13 |
from diffusers.utils import load_image
|
| 14 |
+
from huggingface_hub import HfApi, snapshot_download
|
| 15 |
from pathlib import Path
|
| 16 |
from PIL import Image, ImageOps
|
| 17 |
import cv2
|
|
|
|
| 124 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
| 125 |
return p.replace("{prompt}", positive), n + negative
|
| 126 |
|
|
|
|
| 127 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 128 |
|
| 129 |
eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
|
| 130 |
|
| 131 |
+
# Download the model files
|
| 132 |
+
ckpt_dir_pony = snapshot_download(repo_id="John6666/pony-realism-v21main-sdxl")
|
| 133 |
+
ckpt_dir_cyber = snapshot_download(repo_id="John6666/cyberrealistic-pony-v61-sdxl")
|
| 134 |
+
ckpt_dir_stallion = snapshot_download(repo_id="John6666/stallion-dreams-pony-realistic-v1-sdxl")
|
| 135 |
|
| 136 |
+
# Load the models
|
| 137 |
+
vae_pony = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir_pony, "vae"), torch_dtype=torch.float16)
|
| 138 |
+
vae_cyber = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir_cyber, "vae"), torch_dtype=torch.float16)
|
| 139 |
+
vae_stallion = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir_stallion, "vae"), torch_dtype=torch.float16)
|
| 140 |
+
|
| 141 |
+
controlnet_pony = ControlNetModel.from_pretrained("xinsir/controlnet-union-sdxl-1.0", torch_dtype=torch.float16)
|
| 142 |
+
controlnet_cyber = ControlNetModel.from_pretrained("xinsir/controlnet-union-sdxl-1.0", torch_dtype=torch.float16)
|
| 143 |
+
controlnet_stallion = ControlNetModel.from_pretrained("xinsir/controlnet-union-sdxl-1.0", torch_dtype=torch.float16)
|
| 144 |
+
|
| 145 |
+
pipe_pony = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 146 |
+
ckpt_dir_pony, controlnet=controlnet_pony, vae=vae_pony, torch_dtype=torch.float16, scheduler=eulera_scheduler
|
| 147 |
)
|
| 148 |
+
pipe_cyber = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 149 |
+
ckpt_dir_cyber, controlnet=controlnet_cyber, vae=vae_cyber, torch_dtype=torch.float16, scheduler=eulera_scheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
)
|
| 151 |
+
pipe_stallion = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 152 |
+
ckpt_dir_stallion, controlnet=controlnet_stallion, vae=vae_stallion, torch_dtype=torch.float16, scheduler=eulera_scheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
)
|
| 154 |
+
|
| 155 |
+
pipe_pony.to(device)
|
| 156 |
+
pipe_cyber.to(device)
|
| 157 |
+
pipe_stallion.to(device)
|
| 158 |
|
| 159 |
MAX_SEED = np.iinfo(np.int32).max
|
| 160 |
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
|
|
|
| 185 |
image: dict,
|
| 186 |
prompt: str,
|
| 187 |
negative_prompt: str,
|
| 188 |
+
model_choice: str, # Add this new input
|
| 189 |
style_name: str = DEFAULT_STYLE_NAME,
|
| 190 |
num_steps: int = 25,
|
| 191 |
guidance_scale: float = 5,
|
|
|
|
| 230 |
|
| 231 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 232 |
|
| 233 |
+
# Select the appropriate pipe based on the model choice
|
| 234 |
+
if model_choice == "Pony Realism v21":
|
| 235 |
+
pipe = pipe_pony
|
| 236 |
+
elif model_choice == "Cyber Realistic Pony v61":
|
| 237 |
+
pipe = pipe_cyber
|
| 238 |
+
else: # "Stallion Dreams Pony Realistic v1"
|
| 239 |
+
pipe = pipe_stallion
|
| 240 |
+
|
| 241 |
if use_canny:
|
| 242 |
+
out = pipe(
|
| 243 |
prompt=prompt,
|
| 244 |
negative_prompt=negative_prompt,
|
| 245 |
image=image,
|
|
|
|
| 278 |
with gr.Group():
|
| 279 |
image = gr.ImageEditor(type="pil", label="Sketch your image or upload one", width=512, height=512)
|
| 280 |
prompt = gr.Textbox(label="Prompt")
|
| 281 |
+
model_choice = gr.Dropdown(
|
| 282 |
+
["Pony Realism v21", "Cyber Realistic Pony v61", "Stallion Dreams Pony Realistic v1"],
|
| 283 |
+
label="Model Choice",
|
| 284 |
+
value="Pony Realism v21"
|
| 285 |
+
)
|
| 286 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|
| 287 |
use_canny = gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
|
| 288 |
run_button = gr.Button("Run")
|
|
|
|
| 330 |
image,
|
| 331 |
prompt,
|
| 332 |
negative_prompt,
|
| 333 |
+
model_choice, # Add this new input
|
| 334 |
style,
|
| 335 |
num_steps,
|
| 336 |
guidance_scale,
|