Spaces:
Runtime error
Runtime error
make num_inference_steps configurable
Browse files
IP_Composer/perform_swap.py
CHANGED
|
@@ -25,12 +25,12 @@ def get_embedding_composition(embed, projections_data):
|
|
| 25 |
return combined_embeds
|
| 26 |
|
| 27 |
|
| 28 |
-
def get_modified_images_embeds_composition(embed, projections_data, ip_model, prompt=None, scale=1.0, num_samples=3, seed=420):
|
| 29 |
|
| 30 |
final_embeds = get_embedding_composition(embed, projections_data)
|
| 31 |
clip_embeds = torch.from_numpy(final_embeds)
|
| 32 |
|
| 33 |
-
images = ip_model.generate(clip_image_embeds=clip_embeds, prompt=prompt, num_samples=num_samples, num_inference_steps=
|
| 34 |
return images
|
| 35 |
|
| 36 |
|
|
|
|
| 25 |
return combined_embeds
|
| 26 |
|
| 27 |
|
| 28 |
+
def get_modified_images_embeds_composition(embed, projections_data, ip_model, prompt=None, scale=1.0, num_samples=3, seed=420, num_inference_steps=50):
|
| 29 |
|
| 30 |
final_embeds = get_embedding_composition(embed, projections_data)
|
| 31 |
clip_embeds = torch.from_numpy(final_embeds)
|
| 32 |
|
| 33 |
+
images = ip_model.generate(clip_image_embeds=clip_embeds, prompt=prompt, num_samples=num_samples, num_inference_steps=num_inference_steps, seed=seed, guidance_scale=7.5, scale=scale)
|
| 34 |
return images
|
| 35 |
|
| 36 |
|