Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,7 @@ import sys
|
|
| 11 |
from diffusers.utils import load_image
|
| 12 |
from diffusers import EulerDiscreteScheduler, T2IAdapter
|
| 13 |
|
| 14 |
-
from huggingface_hub import snapshot_download, hf_hub_download, login
|
| 15 |
import gradio as gr
|
| 16 |
|
| 17 |
from pipeline_t2i_adapter import PhotoMakerStableDiffusionXLAdapterPipeline
|
|
@@ -25,16 +25,17 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
| 25 |
login(HF_TOKEN)
|
| 26 |
# global variable
|
| 27 |
|
|
|
|
| 28 |
# model_id = 'SG161222/RealVisXL_V5.0'
|
| 29 |
# model_id = 'Lykon/dreamshaper-xl-lightning'
|
| 30 |
# model_id = 'SG161222/RealVisXL_V5.0_Lightning'
|
|
|
|
| 31 |
model_id = 'RunDiffusion/Juggernaut-XI-v11'
|
| 32 |
-
base_model_path = Path(model_id)
|
| 33 |
-
os.makedirs(base_model_path, exist_ok=True)
|
| 34 |
-
snapshot_download(repo_id=model_id, local_dir=base_model_path)
|
|
|
|
| 35 |
|
| 36 |
-
# base_model_path =
|
| 37 |
-
# base_model_path =
|
| 38 |
face_detector = FaceAnalysis2(providers=['CPUExecutionProvider', 'CUDAExecutionProvider'], allowed_modules=['detection', 'recognition'])
|
| 39 |
face_detector.prepare(ctx_id=0, det_size=(640, 640))
|
| 40 |
|
|
@@ -70,19 +71,34 @@ adapter = T2IAdapter.from_pretrained(
|
|
| 70 |
"TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch_dtype, variant="fp16"
|
| 71 |
).to(device)
|
| 72 |
|
| 73 |
-
pipe = PhotoMakerStableDiffusionXLAdapterPipeline.
|
| 74 |
base_model_path,
|
| 75 |
adapter=adapter,
|
| 76 |
-
torch_dtype=
|
| 77 |
-
use_safetensors=True
|
| 78 |
-
variant="fp16",
|
| 79 |
).to(device)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
pipe.unet = pipe.unet.to(device=device, dtype=torch_dtype)
|
|
|
|
|
|
|
|
|
|
| 82 |
pipe.text_encoder = pipe.text_encoder.to(device=device, dtype=torch_dtype)
|
|
|
|
|
|
|
| 83 |
pipe.text_encoder_2 = pipe.text_encoder_2.to(device=device, dtype=torch_dtype)
|
| 84 |
-
pipe.
|
| 85 |
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
pipe.load_photomaker_adapter(
|
| 88 |
os.path.dirname(photomaker_ckpt),
|
|
|
|
| 11 |
from diffusers.utils import load_image
|
| 12 |
from diffusers import EulerDiscreteScheduler, T2IAdapter
|
| 13 |
|
| 14 |
+
from huggingface_hub import snapshot_download, hf_hub_download, hf_hub_url, login
|
| 15 |
import gradio as gr
|
| 16 |
|
| 17 |
from pipeline_t2i_adapter import PhotoMakerStableDiffusionXLAdapterPipeline
|
|
|
|
| 25 |
login(HF_TOKEN)
|
| 26 |
# global variable
|
| 27 |
|
| 28 |
+
# model_id = 'RunDiffusion/Juggernaut-XL-v9'
|
| 29 |
# model_id = 'SG161222/RealVisXL_V5.0'
|
| 30 |
# model_id = 'Lykon/dreamshaper-xl-lightning'
|
| 31 |
# model_id = 'SG161222/RealVisXL_V5.0_Lightning'
|
| 32 |
+
model_file = "https://huggingface.co/RunDiffusion/Juggernaut-XI-v11/blob/main/Juggernaut-XI-byRunDiffusion.safetensors"
|
| 33 |
model_id = 'RunDiffusion/Juggernaut-XI-v11'
|
| 34 |
+
#base_model_path = Path(model_id)
|
| 35 |
+
#os.makedirs(base_model_path, exist_ok=True)
|
| 36 |
+
#snapshot_download(repo_id=model_id, local_dir=base_model_path)
|
| 37 |
+
model_file = hf_hub_download(repo_id=model_id, filename="Juggernaut-XI-byRunDiffusion.safetensors", repo_type="model")
|
| 38 |
|
|
|
|
|
|
|
| 39 |
face_detector = FaceAnalysis2(providers=['CPUExecutionProvider', 'CUDAExecutionProvider'], allowed_modules=['detection', 'recognition'])
|
| 40 |
face_detector.prepare(ctx_id=0, det_size=(640, 640))
|
| 41 |
|
|
|
|
| 71 |
"TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch_dtype, variant="fp16"
|
| 72 |
).to(device)
|
| 73 |
|
| 74 |
+
pipe = PhotoMakerStableDiffusionXLAdapterPipeline.from_single_file(
|
| 75 |
base_model_path,
|
| 76 |
adapter=adapter,
|
| 77 |
+
torch_dtype=torch.float16,
|
| 78 |
+
use_safetensors=True
|
|
|
|
| 79 |
).to(device)
|
| 80 |
|
| 81 |
+
# pipe = PhotoMakerStableDiffusionXLAdapterPipeline.from_pretrained(
|
| 82 |
+
# base_model_path,
|
| 83 |
+
# adapter=adapter,
|
| 84 |
+
# torch_dtype=torch_dtype,
|
| 85 |
+
# use_safetensors=True,
|
| 86 |
+
# variant="fp16",
|
| 87 |
+
# ).to(device)
|
| 88 |
+
|
| 89 |
pipe.unet = pipe.unet.to(device=device, dtype=torch_dtype)
|
| 90 |
+
pipe.unet.to(memory_format=torch.channels_last)
|
| 91 |
+
pipe.unet.eval()
|
| 92 |
+
|
| 93 |
pipe.text_encoder = pipe.text_encoder.to(device=device, dtype=torch_dtype)
|
| 94 |
+
pipe.text_encoder.eval()
|
| 95 |
+
|
| 96 |
pipe.text_encoder_2 = pipe.text_encoder_2.to(device=device, dtype=torch_dtype)
|
| 97 |
+
pipe.text_encoder_2.eval()
|
| 98 |
|
| 99 |
+
pipe.vae = pipe.vae.to(device=device, dtype=torch_dtype)
|
| 100 |
+
pipe.vae.decode.to(memory_format=torch.channels_last)
|
| 101 |
+
pipe.vae.eval()
|
| 102 |
|
| 103 |
pipe.load_photomaker_adapter(
|
| 104 |
os.path.dirname(photomaker_ckpt),
|