Spaces:
Running
on
Zero
Running
on
Zero
add option for new user provided concepts
#4
by
linoyts
HF Staff
- opened
app.py
CHANGED
@@ -10,6 +10,7 @@ import open_clip
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from IP_Composer.IP_Adapter.ip_adapter import IPAdapterXL
|
12 |
from IP_Composer.perform_swap import compute_dataset_embeds_svd, get_modified_images_embeds_composition
|
|
|
13 |
import spaces
|
14 |
import random
|
15 |
|
@@ -32,6 +33,8 @@ ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckp
|
|
32 |
# Initialize CLIP model
|
33 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
34 |
clip_model.to(device)
|
|
|
|
|
35 |
|
36 |
CONCEPTS_MAP={
|
37 |
"age": "age_descriptions.npy",
|
@@ -120,6 +123,12 @@ def process_images(
|
|
120 |
scale=1.0,
|
121 |
seed=420,
|
122 |
num_inference_steps=50,
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
):
|
124 |
"""Process the base image and concept images to generate modified images"""
|
125 |
# Process base image
|
@@ -129,23 +138,37 @@ def process_images(
|
|
129 |
# Process concept images
|
130 |
concept_images = []
|
131 |
concept_descriptions = []
|
|
|
|
|
132 |
|
133 |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images
|
134 |
if concept_image1 is not None:
|
135 |
concept_images.append(concept_image1)
|
136 |
-
|
|
|
|
|
|
|
|
|
137 |
else:
|
138 |
return None, "Please upload at least one concept image"
|
139 |
|
140 |
# Add second concept (optional)
|
141 |
if concept_image2 is not None:
|
142 |
concept_images.append(concept_image2)
|
143 |
-
|
|
|
|
|
|
|
|
|
144 |
|
145 |
# Add third concept (optional)
|
146 |
if concept_image3 is not None:
|
147 |
concept_images.append(concept_image3)
|
148 |
-
|
|
|
|
|
|
|
|
|
149 |
|
150 |
# Get all ranks
|
151 |
ranks = [rank1]
|
@@ -159,12 +182,15 @@ def process_images(
|
|
159 |
projection_matrices = []
|
160 |
# for the demo, we assume 1 concept image per concept
|
161 |
# for each concept image, we calculate it's image embeedings and load the concepts textual embeddings to copmpute the projection matrix over it
|
162 |
-
for i,
|
163 |
img_pil = Image.fromarray(concept_images[i]).convert("RGB")
|
164 |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device))
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
168 |
|
169 |
projection_matrix = compute_dataset_embeds_svd(all_embeds_in, ranks[i])
|
170 |
projection_matrices.append(projection_matrix)
|
@@ -193,6 +219,13 @@ def process_images(
|
|
193 |
|
194 |
return modified_images[0]
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
def process_and_display(
|
197 |
base_image,
|
198 |
concept_image1, concept_name1="age",
|
@@ -236,7 +269,12 @@ following the algorithm proposed in [*IP-Composer: Semantic Composition of Visua
|
|
236 |
|
237 |
[[project page](https://ip-composer.github.io/IP-Composer/)] [[arxiv](https://arxiv.org/pdf/2502.13951)]
|
238 |
""")
|
239 |
-
|
|
|
|
|
|
|
|
|
|
|
240 |
with gr.Row():
|
241 |
with gr.Column():
|
242 |
base_image = gr.Image(label="Base Image (Required)", type="numpy")
|
@@ -244,15 +282,19 @@ following the algorithm proposed in [*IP-Composer: Semantic Composition of Visua
|
|
244 |
with gr.Row():
|
245 |
with gr.Group():
|
246 |
concept_image1 = gr.Image(label="Concept Image 1", type="numpy")
|
|
|
247 |
concept_name1 = gr.Dropdown(concept_options, label="concept 1", value=None, info="concept type")
|
248 |
|
249 |
with gr.Tab("concept 2 - optional"):
|
250 |
with gr.Group():
|
251 |
concept_image2 = gr.Image(label="Concept Image 2", type="numpy")
|
|
|
252 |
concept_name2 = gr.Dropdown(concept_options, label="concept 2", value=None, info="concept type")
|
|
|
253 |
with gr.Tab("concept 3 - optional"):
|
254 |
with gr.Group():
|
255 |
concept_image3 = gr.Image(label="Concept Image 3", type="numpy")
|
|
|
256 |
concept_name3 = gr.Dropdown(concept_options, label="concept 3", value= None, info="concept type")
|
257 |
|
258 |
|
@@ -284,6 +326,38 @@ following the algorithm proposed in [*IP-Composer: Semantic Composition of Visua
|
|
284 |
fn=generate_examples,
|
285 |
cache_examples=False
|
286 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
287 |
|
288 |
submit_btn.click(
|
289 |
fn=randomize_seed_fn,
|
@@ -296,7 +370,13 @@ following the algorithm proposed in [*IP-Composer: Semantic Composition of Visua
|
|
296 |
concept_image2, concept_name2,
|
297 |
concept_image3, concept_name3,
|
298 |
rank1, rank2, rank3,
|
299 |
-
prompt, scale, seed, num_inference_steps
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
],
|
301 |
outputs=[output_image]
|
302 |
)
|
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from IP_Composer.IP_Adapter.ip_adapter import IPAdapterXL
|
12 |
from IP_Composer.perform_swap import compute_dataset_embeds_svd, get_modified_images_embeds_composition
|
13 |
+
from IP_Compoeser.generate_text_embeddings import load_descriptions, generate_embeddings
|
14 |
import spaces
|
15 |
import random
|
16 |
|
|
|
33 |
# Initialize CLIP model
|
34 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
35 |
clip_model.to(device)
|
36 |
+
tokenizer = open_clip.get_tokenizer('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
37 |
+
|
38 |
|
39 |
CONCEPTS_MAP={
|
40 |
"age": "age_descriptions.npy",
|
|
|
123 |
scale=1.0,
|
124 |
seed=420,
|
125 |
num_inference_steps=50,
|
126 |
+
concpet_from_file_1 = None,
|
127 |
+
concpet_from_file_2 = None,
|
128 |
+
concpet_from_file_3 = None,
|
129 |
+
use_concpet_from_file_1 = False,
|
130 |
+
use_concpet_from_file_2 = False,
|
131 |
+
use_concpet_from_file_3 = False
|
132 |
):
|
133 |
"""Process the base image and concept images to generate modified images"""
|
134 |
# Process base image
|
|
|
138 |
# Process concept images
|
139 |
concept_images = []
|
140 |
concept_descriptions = []
|
141 |
+
|
142 |
+
skip_load_concept =[False,False, False]
|
143 |
|
144 |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images
|
145 |
if concept_image1 is not None:
|
146 |
concept_images.append(concept_image1)
|
147 |
+
if use_concpet_from_file_1 and concpet_from_file_1: # if concept is new from user input
|
148 |
+
concept_descriptions.append(use_concpet_from_file_1)
|
149 |
+
skip_load_concept[0] = True
|
150 |
+
else:
|
151 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name1])
|
152 |
else:
|
153 |
return None, "Please upload at least one concept image"
|
154 |
|
155 |
# Add second concept (optional)
|
156 |
if concept_image2 is not None:
|
157 |
concept_images.append(concept_image2)
|
158 |
+
if use_concpet_from_file_2 and concpet_from_file_2: # if concept is new from user input
|
159 |
+
concept_descriptions.append(use_concpet_from_file_2)
|
160 |
+
skip_load_concept[1] = True
|
161 |
+
else:
|
162 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name2])
|
163 |
|
164 |
# Add third concept (optional)
|
165 |
if concept_image3 is not None:
|
166 |
concept_images.append(concept_image3)
|
167 |
+
if use_concpet_from_file_3 and concpet_from_file_3: # if concept is new from user input
|
168 |
+
concept_descriptions.append(use_concpet_from_file_3)
|
169 |
+
skip_load_concept[2] = True
|
170 |
+
else:
|
171 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name3])
|
172 |
|
173 |
# Get all ranks
|
174 |
ranks = [rank1]
|
|
|
182 |
projection_matrices = []
|
183 |
# for the demo, we assume 1 concept image per concept
|
184 |
# for each concept image, we calculate it's image embeedings and load the concepts textual embeddings to copmpute the projection matrix over it
|
185 |
+
for i, concept in enumerate(concept_descriptions):
|
186 |
img_pil = Image.fromarray(concept_images[i]).convert("RGB")
|
187 |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device))
|
188 |
+
if skip_load_concept[i]: # if concept is new from user input
|
189 |
+
all_embeds_in = concept
|
190 |
+
else:
|
191 |
+
embeds_path = f"./IP_Composer/text_embeddings/{concept}"
|
192 |
+
with open(embeds_path, "rb") as f:
|
193 |
+
all_embeds_in = np.load(f)
|
194 |
|
195 |
projection_matrix = compute_dataset_embeds_svd(all_embeds_in, ranks[i])
|
196 |
projection_matrices.append(projection_matrix)
|
|
|
219 |
|
220 |
return modified_images[0]
|
221 |
|
222 |
+
@spaces.GPU
|
223 |
+
def get_text_embeddings(concept_file):
|
224 |
+
descriptions = load_descriptions(concept_file)
|
225 |
+
embeddings = generate_embeddings(descriptions, model, tokenizer, device, batch_size=100)
|
226 |
+
return embeddings, True
|
227 |
+
|
228 |
+
|
229 |
def process_and_display(
|
230 |
base_image,
|
231 |
concept_image1, concept_name1="age",
|
|
|
269 |
|
270 |
[[project page](https://ip-composer.github.io/IP-Composer/)] [[arxiv](https://arxiv.org/pdf/2502.13951)]
|
271 |
""")
|
272 |
+
concpet_from_file_1 = gr.State()
|
273 |
+
concpet_from_file_2 = gr.State()
|
274 |
+
concpet_from_file_3 = gr.State()
|
275 |
+
use_concpet_from_file_1 = gr.State()
|
276 |
+
use_concpet_from_file_2 = gr.State()
|
277 |
+
use_concpet_from_file_3 = gr.State()
|
278 |
with gr.Row():
|
279 |
with gr.Column():
|
280 |
base_image = gr.Image(label="Base Image (Required)", type="numpy")
|
|
|
282 |
with gr.Row():
|
283 |
with gr.Group():
|
284 |
concept_image1 = gr.Image(label="Concept Image 1", type="numpy")
|
285 |
+
concept_file_1 = gradio.File(label="concept variations", file_types="text")
|
286 |
concept_name1 = gr.Dropdown(concept_options, label="concept 1", value=None, info="concept type")
|
287 |
|
288 |
with gr.Tab("concept 2 - optional"):
|
289 |
with gr.Group():
|
290 |
concept_image2 = gr.Image(label="Concept Image 2", type="numpy")
|
291 |
+
concept_file_2 = gradio.File(label="concept variations", file_types="text")
|
292 |
concept_name2 = gr.Dropdown(concept_options, label="concept 2", value=None, info="concept type")
|
293 |
+
|
294 |
with gr.Tab("concept 3 - optional"):
|
295 |
with gr.Group():
|
296 |
concept_image3 = gr.Image(label="Concept Image 3", type="numpy")
|
297 |
+
concept_file_3 = gradio.File(label="concept variations", file_types="text")
|
298 |
concept_name3 = gr.Dropdown(concept_options, label="concept 3", value= None, info="concept type")
|
299 |
|
300 |
|
|
|
326 |
fn=generate_examples,
|
327 |
cache_examples=False
|
328 |
)
|
329 |
+
|
330 |
+
concept_file_1.upload(
|
331 |
+
fn=get_text_embeddings,
|
332 |
+
inputs=[concept_file_1],
|
333 |
+
outputs=[concpet_from_file_1, use_concpet_from_file_1]
|
334 |
+
)
|
335 |
+
concept_file_2.upload(
|
336 |
+
fn=get_text_embeddings,
|
337 |
+
inputs=[concept_file_2],
|
338 |
+
outputs=[concpet_from_file_2, use_concpet_from_file_2]
|
339 |
+
)
|
340 |
+
concept_file_3.upload(
|
341 |
+
fn=get_text_embeddings,
|
342 |
+
inputs=[concept_file_3],
|
343 |
+
outputs=[concpet_from_file_3, use_concpet_from_file_3]
|
344 |
+
)
|
345 |
+
|
346 |
+
concept_file_1.delete(
|
347 |
+
fn=lambda x: False,
|
348 |
+
inputs=[concept_file_1],
|
349 |
+
outputs=[use_concpet_from_file_1]
|
350 |
+
)
|
351 |
+
concept_file_2.delete(
|
352 |
+
fn=lambda x: False,
|
353 |
+
inputs=[concept_file_2],
|
354 |
+
outputs=[use_concpet_from_file_2]
|
355 |
+
)
|
356 |
+
concept_file_3.delete(
|
357 |
+
fn=lambda x: False,
|
358 |
+
inputs=[concept_file_3],
|
359 |
+
outputs=[use_concpet_from_file_3]
|
360 |
+
)
|
361 |
|
362 |
submit_btn.click(
|
363 |
fn=randomize_seed_fn,
|
|
|
370 |
concept_image2, concept_name2,
|
371 |
concept_image3, concept_name3,
|
372 |
rank1, rank2, rank3,
|
373 |
+
prompt, scale, seed, num_inference_steps,
|
374 |
+
concpet_from_file_1,
|
375 |
+
concpet_from_file_2,
|
376 |
+
concpet_from_file_3,
|
377 |
+
use_concpet_from_file_1,
|
378 |
+
use_concpet_from_file_2,
|
379 |
+
use_concpet_from_file_3
|
380 |
],
|
381 |
outputs=[output_image]
|
382 |
)
|