Spaces:
Runtime error
Runtime error
Commit
·
d939c9c
1
Parent(s):
2c9f146
update
Browse files- app.py +4 -4
- pipelines/pipeline_infu_flux.py +2 -1
app.py
CHANGED
|
@@ -113,17 +113,17 @@ def prepare_pipeline(model_version, enable_realism, enable_anti_blur):
|
|
| 113 |
pipeline.infusenet_sim.cpu()
|
| 114 |
pipeline.image_proj_model_sim.cpu()
|
| 115 |
torch.cuda.empty_cache()
|
| 116 |
-
pipeline.infusenet_aes.to(
|
| 117 |
pipeline.pipe.controlnet = pipeline.infusenet_aes
|
| 118 |
-
pipeline.image_proj_model_aes.to(
|
| 119 |
pipeline.image_proj_model = pipeline.image_proj_model_aes
|
| 120 |
else:
|
| 121 |
pipeline.infusenet_aes.cpu()
|
| 122 |
pipeline.image_proj_model_aes.cpu()
|
| 123 |
torch.cuda.empty_cache()
|
| 124 |
-
pipeline.infusenet_sim.to(
|
| 125 |
pipeline.pipe.controlnet = pipeline.infusenet_sim
|
| 126 |
-
pipeline.image_proj_model_sim.to(
|
| 127 |
pipeline.image_proj_model = pipeline.image_proj_model_sim
|
| 128 |
|
| 129 |
loaded_pipeline_config['pipeline'] = pipeline
|
|
|
|
| 113 |
pipeline.infusenet_sim.cpu()
|
| 114 |
pipeline.image_proj_model_sim.cpu()
|
| 115 |
torch.cuda.empty_cache()
|
| 116 |
+
pipeline.infusenet_aes.to('cuda')
|
| 117 |
pipeline.pipe.controlnet = pipeline.infusenet_aes
|
| 118 |
+
pipeline.image_proj_model_aes.to('cuda')
|
| 119 |
pipeline.image_proj_model = pipeline.image_proj_model_aes
|
| 120 |
else:
|
| 121 |
pipeline.infusenet_aes.cpu()
|
| 122 |
pipeline.image_proj_model_aes.cpu()
|
| 123 |
torch.cuda.empty_cache()
|
| 124 |
+
pipeline.infusenet_sim.to('cuda')
|
| 125 |
pipeline.pipe.controlnet = pipeline.infusenet_sim
|
| 126 |
+
pipeline.image_proj_model_sim.to('cuda')
|
| 127 |
pipeline.image_proj_model = pipeline.image_proj_model_sim
|
| 128 |
|
| 129 |
loaded_pipeline_config['pipeline'] = pipeline
|
pipelines/pipeline_infu_flux.py
CHANGED
|
@@ -309,9 +309,10 @@ class InfUFluxPipeline:
|
|
| 309 |
face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face
|
| 310 |
landmark = face_info['kps']
|
| 311 |
id_embed = extract_arcface_bgr_embedding(id_image_cv2, landmark, self.arcface_model)
|
| 312 |
-
id_embed = id_embed.clone().unsqueeze(0).float()
|
| 313 |
id_embed = id_embed.reshape([1, -1, 512])
|
| 314 |
id_embed = id_embed.to(device='cuda', dtype=torch.bfloat16)
|
|
|
|
| 315 |
with torch.no_grad():
|
| 316 |
id_embed = self.image_proj_model(id_embed)
|
| 317 |
bs_embed, seq_len, _ = id_embed.shape
|
|
|
|
| 309 |
face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face
|
| 310 |
landmark = face_info['kps']
|
| 311 |
id_embed = extract_arcface_bgr_embedding(id_image_cv2, landmark, self.arcface_model)
|
| 312 |
+
id_embed = id_embed.clone().unsqueeze(0).float()
|
| 313 |
id_embed = id_embed.reshape([1, -1, 512])
|
| 314 |
id_embed = id_embed.to(device='cuda', dtype=torch.bfloat16)
|
| 315 |
+
self.image_proj_model.to('cuda', torch.bfloat16)
|
| 316 |
with torch.no_grad():
|
| 317 |
id_embed = self.image_proj_model(id_embed)
|
| 318 |
bs_embed, seq_len, _ = id_embed.shape
|