Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c0b0ee8
1
Parent(s):
b58af11
Don't put onnx face encoders on gpu when using hf space
Browse files- adaface/adaface_wrapper.py +3 -1
- adaface/face_id_to_ada_prompt.py +4 -3
- app.py +6 -3
- requirements.txt +1 -1
adaface/adaface_wrapper.py
CHANGED
@@ -31,7 +31,7 @@ class AdaFaceWrapper(nn.Module):
|
|
31 |
main_unet_filepath=None, unet_types=None, extra_unet_dirpaths=None, unet_weights_in_ensemble=None,
|
32 |
enable_static_img_suffix_embs=None, unet_uses_attn_lora=False,
|
33 |
attn_lora_layer_names=['q', 'k', 'v', 'out'], normalize_cross_attn=False, q_lora_updates_query=False,
|
34 |
-
device='cuda', is_training=False):
|
35 |
'''
|
36 |
pipeline_name: "text2img", "text2imgxl", "img2img", "text2img3", "flux", or None.
|
37 |
If None, it's used only as a face encoder, and the unet and vae are
|
@@ -64,6 +64,7 @@ class AdaFaceWrapper(nn.Module):
|
|
64 |
self.unet_weights_in_ensemble = unet_weights_in_ensemble
|
65 |
self.device = device
|
66 |
self.is_training = is_training
|
|
|
67 |
|
68 |
if negative_prompt is None:
|
69 |
self.negative_prompt = \
|
@@ -99,6 +100,7 @@ class AdaFaceWrapper(nn.Module):
|
|
99 |
self.adaface_ckpt_paths,
|
100 |
self.adaface_encoder_cfg_scales,
|
101 |
self.enabled_encoders,
|
|
|
102 |
num_static_img_suffix_embs=4)
|
103 |
|
104 |
self.id2ada_prompt_encoder.to(self.device)
|
|
|
31 |
main_unet_filepath=None, unet_types=None, extra_unet_dirpaths=None, unet_weights_in_ensemble=None,
|
32 |
enable_static_img_suffix_embs=None, unet_uses_attn_lora=False,
|
33 |
attn_lora_layer_names=['q', 'k', 'v', 'out'], normalize_cross_attn=False, q_lora_updates_query=False,
|
34 |
+
device='cuda', is_training=False, is_on_hf_space=False):
|
35 |
'''
|
36 |
pipeline_name: "text2img", "text2imgxl", "img2img", "text2img3", "flux", or None.
|
37 |
If None, it's used only as a face encoder, and the unet and vae are
|
|
|
64 |
self.unet_weights_in_ensemble = unet_weights_in_ensemble
|
65 |
self.device = device
|
66 |
self.is_training = is_training
|
67 |
+
self.is_on_hf_space = is_on_hf_space
|
68 |
|
69 |
if negative_prompt is None:
|
70 |
self.negative_prompt = \
|
|
|
100 |
self.adaface_ckpt_paths,
|
101 |
self.adaface_encoder_cfg_scales,
|
102 |
self.enabled_encoders,
|
103 |
+
is_on_hf_space=self.is_on_hf_space,
|
104 |
num_static_img_suffix_embs=4)
|
105 |
|
106 |
self.id2ada_prompt_encoder.to(self.device)
|
adaface/face_id_to_ada_prompt.py
CHANGED
@@ -26,7 +26,7 @@ def create_id2ada_prompt_encoder(adaface_encoder_types, adaface_ckpt_paths=None,
|
|
26 |
if adaface_encoder_type == 'arc2face':
|
27 |
id2ada_prompt_encoder = \
|
28 |
Arc2Face_ID2AdaPrompt(adaface_ckpt_path=adaface_ckpt_path,
|
29 |
-
|
30 |
elif adaface_encoder_type == 'consistentID':
|
31 |
id2ada_prompt_encoder = \
|
32 |
ConsistentID_ID2AdaPrompt(pipe=None,
|
@@ -64,6 +64,7 @@ class FaceID2AdaPrompt(nn.Module):
|
|
64 |
# i.e., 6 for arc2face and 1 for consistentID.
|
65 |
self.out_id_embs_cfg_scale = kwargs.get('out_id_embs_cfg_scale', -1)
|
66 |
self.is_training = kwargs.get('is_training', False)
|
|
|
67 |
# extend_prompt2token_proj_attention_multiplier is an integer >= 1.
|
68 |
# TODO: extend_prompt2token_proj_attention_multiplier should be a list of integers.
|
69 |
self.extend_prompt2token_proj_attention_multiplier = kwargs.get('extend_prompt2token_proj_attention_multiplier', 1)
|
@@ -655,7 +656,7 @@ class Arc2Face_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
655 |
if device == self.device:
|
656 |
return
|
657 |
|
658 |
-
if str(device) == 'cpu':
|
659 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
660 |
providers=['CPUExecutionProvider'])
|
661 |
self.face_app.prepare(ctx_id=0, det_size=(512, 512))
|
@@ -801,7 +802,7 @@ class ConsistentID_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
801 |
if device == self.device:
|
802 |
return
|
803 |
|
804 |
-
if str(device) == 'cpu':
|
805 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
806 |
providers=['CPUExecutionProvider'])
|
807 |
self.face_app.prepare(ctx_id=0, det_size=(512, 512))
|
|
|
26 |
if adaface_encoder_type == 'arc2face':
|
27 |
id2ada_prompt_encoder = \
|
28 |
Arc2Face_ID2AdaPrompt(adaface_ckpt_path=adaface_ckpt_path,
|
29 |
+
*args, **kwargs)
|
30 |
elif adaface_encoder_type == 'consistentID':
|
31 |
id2ada_prompt_encoder = \
|
32 |
ConsistentID_ID2AdaPrompt(pipe=None,
|
|
|
64 |
# i.e., 6 for arc2face and 1 for consistentID.
|
65 |
self.out_id_embs_cfg_scale = kwargs.get('out_id_embs_cfg_scale', -1)
|
66 |
self.is_training = kwargs.get('is_training', False)
|
67 |
+
self.is_on_hf_space = kwargs.get('is_on_hf_space', False)
|
68 |
# extend_prompt2token_proj_attention_multiplier is an integer >= 1.
|
69 |
# TODO: extend_prompt2token_proj_attention_multiplier should be a list of integers.
|
70 |
self.extend_prompt2token_proj_attention_multiplier = kwargs.get('extend_prompt2token_proj_attention_multiplier', 1)
|
|
|
656 |
if device == self.device:
|
657 |
return
|
658 |
|
659 |
+
if str(device) == 'cpu' or self.is_on_hf_space:
|
660 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
661 |
providers=['CPUExecutionProvider'])
|
662 |
self.face_app.prepare(ctx_id=0, det_size=(512, 512))
|
|
|
802 |
if device == self.device:
|
803 |
return
|
804 |
|
805 |
+
if str(device) == 'cpu' or self.is_on_hf_space:
|
806 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
807 |
providers=['CPUExecutionProvider'])
|
808 |
self.face_app.prepare(ctx_id=0, det_size=(512, 512))
|
app.py
CHANGED
@@ -44,7 +44,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
44 |
seed = random.randint(0, MAX_SEED)
|
45 |
return seed
|
46 |
|
47 |
-
def
|
48 |
return os.getenv("SPACE_ID") is not None
|
49 |
|
50 |
from huggingface_hub import snapshot_download
|
@@ -59,13 +59,15 @@ os.makedirs("/tmp/gradio", exist_ok=True)
|
|
59 |
app = FaceAnalysis(name="buffalo_l", root='models/insightface', providers=['CPUExecutionProvider'])
|
60 |
app.prepare(ctx_id=0, det_size=(320, 320))
|
61 |
|
62 |
-
if
|
63 |
device = 'cuda:0'
|
|
|
64 |
else:
|
65 |
if args.gpu is None:
|
66 |
device = "cuda"
|
67 |
else:
|
68 |
device = f"cuda:{args.gpu}"
|
|
|
69 |
|
70 |
print(f"Device: {device}")
|
71 |
|
@@ -76,7 +78,8 @@ id_animator = load_model(model_style_type=args.model_style_type, device='cpu')
|
|
76 |
adaface = AdaFaceWrapper(pipeline_name="text2img", base_model_path=adaface_base_model_path,
|
77 |
adaface_encoder_types=args.adaface_encoder_types,
|
78 |
adaface_ckpt_paths=args.adaface_ckpt_path, device='cpu',
|
79 |
-
num_inference_steps=args.num_inference_steps
|
|
|
80 |
|
81 |
basedir = os.getcwd()
|
82 |
savedir = os.path.join(basedir,'samples')
|
|
|
44 |
seed = random.randint(0, MAX_SEED)
|
45 |
return seed
|
46 |
|
47 |
+
def is_running_on_hf_space():
|
48 |
return os.getenv("SPACE_ID") is not None
|
49 |
|
50 |
from huggingface_hub import snapshot_download
|
|
|
59 |
app = FaceAnalysis(name="buffalo_l", root='models/insightface', providers=['CPUExecutionProvider'])
|
60 |
app.prepare(ctx_id=0, det_size=(320, 320))
|
61 |
|
62 |
+
if is_running_on_hf_space():
|
63 |
device = 'cuda:0'
|
64 |
+
is_on_hf_space = True
|
65 |
else:
|
66 |
if args.gpu is None:
|
67 |
device = "cuda"
|
68 |
else:
|
69 |
device = f"cuda:{args.gpu}"
|
70 |
+
is_on_hf_space = False
|
71 |
|
72 |
print(f"Device: {device}")
|
73 |
|
|
|
78 |
adaface = AdaFaceWrapper(pipeline_name="text2img", base_model_path=adaface_base_model_path,
|
79 |
adaface_encoder_types=args.adaface_encoder_types,
|
80 |
adaface_ckpt_paths=args.adaface_ckpt_path, device='cpu',
|
81 |
+
num_inference_steps=args.num_inference_steps,
|
82 |
+
is_on_hf_space=is_on_hf_space)
|
83 |
|
84 |
basedir = os.getcwd()
|
85 |
savedir = os.path.join(basedir,'samples')
|
requirements.txt
CHANGED
@@ -11,7 +11,7 @@ insightface
|
|
11 |
omegaconf
|
12 |
opencv-python
|
13 |
onnx>=1.16.0
|
14 |
-
onnxruntime
|
15 |
safetensors
|
16 |
spaces
|
17 |
ftfy
|
|
|
11 |
omegaconf
|
12 |
opencv-python
|
13 |
onnx>=1.16.0
|
14 |
+
onnxruntime
|
15 |
safetensors
|
16 |
spaces
|
17 |
ftfy
|