Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -273,6 +273,7 @@ def generate_30(
|
|
273 |
num_images: int = 1,
|
274 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
275 |
):
|
|
|
276 |
torch.cuda.empty_cache()
|
277 |
gc.collect()
|
278 |
global models
|
@@ -329,6 +330,7 @@ def generate_60(
|
|
329 |
num_images: int = 1,
|
330 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
331 |
):
|
|
|
332 |
torch.cuda.empty_cache()
|
333 |
gc.collect()
|
334 |
global models
|
@@ -385,6 +387,7 @@ def generate_90(
|
|
385 |
num_images: int = 1,
|
386 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
387 |
):
|
|
|
388 |
torch.cuda.empty_cache()
|
389 |
gc.collect()
|
390 |
global models
|
@@ -658,7 +661,6 @@ description = "Text Generator Application by ecarbo"
|
|
658 |
|
659 |
if __name__ == "__main__":
|
660 |
demo_interface = demo.queue(max_size=50) # Remove .launch() here
|
661 |
-
|
662 |
text_gen_interface = gr.Interface(
|
663 |
fn=text_generation,
|
664 |
inputs=[
|
@@ -669,6 +671,5 @@ if __name__ == "__main__":
|
|
669 |
title=title,
|
670 |
description=description,
|
671 |
)
|
672 |
-
|
673 |
combined_interface = gr.TabbedInterface([demo_interface, text_gen_interface], ["Image Generation", "Text Generation"])
|
674 |
combined_interface.launch(show_api=False)
|
|
|
273 |
num_images: int = 1,
|
274 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
275 |
):
|
276 |
+
torch.backends.cudnn.benchmark = False
|
277 |
torch.cuda.empty_cache()
|
278 |
gc.collect()
|
279 |
global models
|
|
|
330 |
num_images: int = 1,
|
331 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
332 |
):
|
333 |
+
torch.backends.cudnn.benchmark = True
|
334 |
torch.cuda.empty_cache()
|
335 |
gc.collect()
|
336 |
global models
|
|
|
387 |
num_images: int = 1,
|
388 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
389 |
):
|
390 |
+
torch.backends.cudnn.benchmark = True
|
391 |
torch.cuda.empty_cache()
|
392 |
gc.collect()
|
393 |
global models
|
|
|
661 |
|
662 |
if __name__ == "__main__":
|
663 |
demo_interface = demo.queue(max_size=50) # Remove .launch() here
|
|
|
664 |
text_gen_interface = gr.Interface(
|
665 |
fn=text_generation,
|
666 |
inputs=[
|
|
|
671 |
title=title,
|
672 |
description=description,
|
673 |
)
|
|
|
674 |
combined_interface = gr.TabbedInterface([demo_interface, text_gen_interface], ["Image Generation", "Text Generation"])
|
675 |
combined_interface.launch(show_api=False)
|