Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -177,6 +177,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
|
177 |
@spaces.GPU(duration=30)
|
178 |
def generate_30(
|
179 |
prompt: str,
|
|
|
180 |
negative_prompt: str = "",
|
181 |
use_negative_prompt: bool = False,
|
182 |
style_selection: str = "",
|
@@ -191,10 +192,30 @@ def generate_30(
|
|
191 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
192 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
193 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
options = {
|
195 |
-
"prompt":
|
196 |
-
"
|
197 |
-
"
|
|
|
198 |
"width": width,
|
199 |
"height": height,
|
200 |
"guidance_scale": guidance_scale,
|
@@ -219,6 +240,7 @@ def generate_30(
|
|
219 |
@spaces.GPU(duration=60)
|
220 |
def generate_60(
|
221 |
prompt: str,
|
|
|
222 |
negative_prompt: str = "",
|
223 |
use_negative_prompt: bool = False,
|
224 |
style_selection: str = "",
|
@@ -233,10 +255,30 @@ def generate_60(
|
|
233 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
234 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
235 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
options = {
|
237 |
-
"prompt":
|
238 |
-
"
|
239 |
-
"
|
|
|
240 |
"width": width,
|
241 |
"height": height,
|
242 |
"guidance_scale": guidance_scale,
|
@@ -261,6 +303,7 @@ def generate_60(
|
|
261 |
@spaces.GPU(duration=90)
|
262 |
def generate_90(
|
263 |
prompt: str,
|
|
|
264 |
negative_prompt: str = "",
|
265 |
use_negative_prompt: bool = False,
|
266 |
style_selection: str = "",
|
@@ -275,10 +318,30 @@ def generate_90(
|
|
275 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
276 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
277 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
options = {
|
279 |
-
"prompt":
|
280 |
-
"
|
281 |
-
"
|
|
|
282 |
"width": width,
|
283 |
"height": height,
|
284 |
"guidance_scale": guidance_scale,
|
@@ -338,6 +401,13 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
338 |
placeholder="Enter your prompt",
|
339 |
container=False,
|
340 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
342 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
343 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
@@ -416,6 +486,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
416 |
fn=generate_30,
|
417 |
inputs=[
|
418 |
prompt,
|
|
|
419 |
negative_prompt,
|
420 |
use_negative_prompt,
|
421 |
style_selection,
|
@@ -435,6 +506,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
435 |
fn=generate_60,
|
436 |
inputs=[
|
437 |
prompt,
|
|
|
438 |
negative_prompt,
|
439 |
use_negative_prompt,
|
440 |
style_selection,
|
@@ -454,6 +526,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
454 |
fn=generate_90,
|
455 |
inputs=[
|
456 |
prompt,
|
|
|
457 |
negative_prompt,
|
458 |
use_negative_prompt,
|
459 |
style_selection,
|
|
|
177 |
@spaces.GPU(duration=30)
|
178 |
def generate_30(
|
179 |
prompt: str,
|
180 |
+
prompt2: str,
|
181 |
negative_prompt: str = "",
|
182 |
use_negative_prompt: bool = False,
|
183 |
style_selection: str = "",
|
|
|
192 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
193 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
194 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
195 |
+
|
196 |
+
text_inputs1 = tokenizer(
|
197 |
+
prompt,
|
198 |
+
padding="max_length",
|
199 |
+
max_length=77,
|
200 |
+
truncation=True,
|
201 |
+
return_tensors="pt",
|
202 |
+
)
|
203 |
+
text_inputs2 = tokenizer(
|
204 |
+
prompt2,
|
205 |
+
padding="max_length",
|
206 |
+
max_length=77,
|
207 |
+
truncation=True,
|
208 |
+
return_tensors="pt",
|
209 |
+
)
|
210 |
+
prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
211 |
+
prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
212 |
+
prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
|
213 |
+
|
214 |
options = {
|
215 |
+
#"prompt": prompt,
|
216 |
+
"prompt_embeds": prompt_embeds,
|
217 |
+
"negative_prompt": negative_prompt,
|
218 |
+
"negative_prompt_2": neg_prompt_2,
|
219 |
"width": width,
|
220 |
"height": height,
|
221 |
"guidance_scale": guidance_scale,
|
|
|
240 |
@spaces.GPU(duration=60)
|
241 |
def generate_60(
|
242 |
prompt: str,
|
243 |
+
prompt2: str,
|
244 |
negative_prompt: str = "",
|
245 |
use_negative_prompt: bool = False,
|
246 |
style_selection: str = "",
|
|
|
255 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
256 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
257 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
258 |
+
|
259 |
+
text_inputs1 = tokenizer(
|
260 |
+
prompt,
|
261 |
+
padding="max_length",
|
262 |
+
max_length=77,
|
263 |
+
truncation=True,
|
264 |
+
return_tensors="pt",
|
265 |
+
)
|
266 |
+
text_inputs2 = tokenizer(
|
267 |
+
prompt2,
|
268 |
+
padding="max_length",
|
269 |
+
max_length=77,
|
270 |
+
truncation=True,
|
271 |
+
return_tensors="pt",
|
272 |
+
)
|
273 |
+
prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
274 |
+
prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
275 |
+
prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
|
276 |
+
|
277 |
options = {
|
278 |
+
#"prompt": prompt,
|
279 |
+
"prompt_embeds": prompt_embeds,
|
280 |
+
"negative_prompt": negative_prompt,
|
281 |
+
"negative_prompt_2": neg_prompt_2,
|
282 |
"width": width,
|
283 |
"height": height,
|
284 |
"guidance_scale": guidance_scale,
|
|
|
303 |
@spaces.GPU(duration=90)
|
304 |
def generate_90(
|
305 |
prompt: str,
|
306 |
+
prompt2: str,
|
307 |
negative_prompt: str = "",
|
308 |
use_negative_prompt: bool = False,
|
309 |
style_selection: str = "",
|
|
|
318 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
319 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
320 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
321 |
+
|
322 |
+
text_inputs1 = tokenizer(
|
323 |
+
prompt,
|
324 |
+
padding="max_length",
|
325 |
+
max_length=77,
|
326 |
+
truncation=True,
|
327 |
+
return_tensors="pt",
|
328 |
+
)
|
329 |
+
text_inputs2 = tokenizer(
|
330 |
+
prompt2,
|
331 |
+
padding="max_length",
|
332 |
+
max_length=77,
|
333 |
+
truncation=True,
|
334 |
+
return_tensors="pt",
|
335 |
+
)
|
336 |
+
prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
337 |
+
prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
338 |
+
prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
|
339 |
+
|
340 |
options = {
|
341 |
+
#"prompt": prompt,
|
342 |
+
"prompt_embeds": prompt_embeds,
|
343 |
+
"negative_prompt": negative_prompt,
|
344 |
+
"negative_prompt_2": neg_prompt_2,
|
345 |
"width": width,
|
346 |
"height": height,
|
347 |
"guidance_scale": guidance_scale,
|
|
|
401 |
placeholder="Enter your prompt",
|
402 |
container=False,
|
403 |
)
|
404 |
+
prompt2 = gr.Text(
|
405 |
+
label="Prompt 2",
|
406 |
+
show_label=False,
|
407 |
+
max_lines=1,
|
408 |
+
placeholder="Enter your prompt",
|
409 |
+
container=False,
|
410 |
+
)
|
411 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
412 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
413 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
|
|
486 |
fn=generate_30,
|
487 |
inputs=[
|
488 |
prompt,
|
489 |
+
prompt2,
|
490 |
negative_prompt,
|
491 |
use_negative_prompt,
|
492 |
style_selection,
|
|
|
506 |
fn=generate_60,
|
507 |
inputs=[
|
508 |
prompt,
|
509 |
+
prompt2,
|
510 |
negative_prompt,
|
511 |
use_negative_prompt,
|
512 |
style_selection,
|
|
|
526 |
fn=generate_90,
|
527 |
inputs=[
|
528 |
prompt,
|
529 |
+
prompt2,
|
530 |
negative_prompt,
|
531 |
use_negative_prompt,
|
532 |
style_selection,
|