Spaces:
Running
on
Zero
Running
on
Zero
app
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ from ip_adapter import CSGO
|
|
21 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
22 |
|
23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
-
|
25 |
import os
|
26 |
os.system("git lfs install")
|
27 |
os.system("git clone https://huggingface.co/h94/IP-Adapter")
|
@@ -101,20 +101,28 @@ def get_example():
|
|
101 |
"there is a small house with a sheep statue on top of it",
|
102 |
0.6,
|
103 |
1.0,
|
|
|
|
|
104 |
],
|
105 |
[
|
106 |
None,
|
107 |
'./assets/img_1.png',
|
108 |
"Text-Driven Style Synthesis",
|
109 |
"a cat",
|
110 |
-
0.01,
|
|
|
|
|
|
|
111 |
],
|
112 |
[
|
113 |
None,
|
114 |
'./assets/img_2.png',
|
115 |
"Text-Driven Style Synthesis",
|
116 |
"a building",
|
117 |
-
0.01,
|
|
|
|
|
|
|
118 |
],
|
119 |
[
|
120 |
"./assets/img_0.png",
|
@@ -122,23 +130,25 @@ def get_example():
|
|
122 |
"Text Edit-Driven Style Synthesis",
|
123 |
"there is a small house",
|
124 |
0.4,
|
125 |
-
1.0
|
|
|
|
|
126 |
],
|
127 |
]
|
128 |
return case
|
129 |
|
130 |
-
|
131 |
-
def run_for_examples(content_image_pil,style_image_pil,target, prompt, scale_c, scale_s):
|
132 |
return create_image(
|
133 |
content_image_pil=content_image_pil,
|
134 |
style_image_pil=style_image_pil,
|
135 |
prompt=prompt,
|
136 |
scale_c=scale_c,
|
137 |
scale_s=scale_s,
|
138 |
-
guidance_scale=
|
139 |
num_samples=2,
|
140 |
num_inference_steps=50,
|
141 |
-
seed=
|
142 |
target=target,
|
143 |
)
|
144 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
@@ -156,7 +166,7 @@ def image_grid(imgs, rows, cols):
|
|
156 |
for i, img in enumerate(imgs):
|
157 |
grid.paste(img, box=(i % cols * w, i // cols * h))
|
158 |
return grid
|
159 |
-
@spaces.GPU(duration=
|
160 |
def create_image(content_image_pil,
|
161 |
style_image_pil,
|
162 |
prompt,
|
@@ -350,10 +360,10 @@ with block:
|
|
350 |
|
351 |
gr.Examples(
|
352 |
examples=get_example(),
|
353 |
-
inputs=[content_image_pil,style_image_pil,target, prompt, scale_c, scale_s],
|
354 |
fn=run_for_examples,
|
355 |
outputs=[generated_image],
|
356 |
-
cache_examples=
|
357 |
)
|
358 |
|
359 |
gr.Markdown(article)
|
|
|
21 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
22 |
|
23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
+
dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
|
25 |
import os
|
26 |
os.system("git lfs install")
|
27 |
os.system("git clone https://huggingface.co/h94/IP-Adapter")
|
|
|
101 |
"there is a small house with a sheep statue on top of it",
|
102 |
0.6,
|
103 |
1.0,
|
104 |
+
7.0,
|
105 |
+
42
|
106 |
],
|
107 |
[
|
108 |
None,
|
109 |
'./assets/img_1.png',
|
110 |
"Text-Driven Style Synthesis",
|
111 |
"a cat",
|
112 |
+
0.01,
|
113 |
+
1.0,
|
114 |
+
7.0,
|
115 |
+
42
|
116 |
],
|
117 |
[
|
118 |
None,
|
119 |
'./assets/img_2.png',
|
120 |
"Text-Driven Style Synthesis",
|
121 |
"a building",
|
122 |
+
0.01,
|
123 |
+
1.0,
|
124 |
+
7.0,
|
125 |
+
42,
|
126 |
],
|
127 |
[
|
128 |
"./assets/img_0.png",
|
|
|
130 |
"Text Edit-Driven Style Synthesis",
|
131 |
"there is a small house",
|
132 |
0.4,
|
133 |
+
1.0,
|
134 |
+
7.0,
|
135 |
+
42,
|
136 |
],
|
137 |
]
|
138 |
return case
|
139 |
|
140 |
+
|
141 |
+
def run_for_examples(content_image_pil,style_image_pil,target, prompt, scale_c, scale_s,guidance_scale,seed):
|
142 |
return create_image(
|
143 |
content_image_pil=content_image_pil,
|
144 |
style_image_pil=style_image_pil,
|
145 |
prompt=prompt,
|
146 |
scale_c=scale_c,
|
147 |
scale_s=scale_s,
|
148 |
+
guidance_scale=guidance_scale,
|
149 |
num_samples=2,
|
150 |
num_inference_steps=50,
|
151 |
+
seed=seed,
|
152 |
target=target,
|
153 |
)
|
154 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
|
166 |
for i, img in enumerate(imgs):
|
167 |
grid.paste(img, box=(i % cols * w, i // cols * h))
|
168 |
return grid
|
169 |
+
@spaces.GPU(duration=200)
|
170 |
def create_image(content_image_pil,
|
171 |
style_image_pil,
|
172 |
prompt,
|
|
|
360 |
|
361 |
gr.Examples(
|
362 |
examples=get_example(),
|
363 |
+
inputs=[content_image_pil,style_image_pil,target, prompt, scale_c, scale_s,guidance_scale,seed],
|
364 |
fn=run_for_examples,
|
365 |
outputs=[generated_image],
|
366 |
+
cache_examples=False,
|
367 |
)
|
368 |
|
369 |
gr.Markdown(article)
|