Spaces:
Runtime error
Runtime error
Update gradio_app.py
Browse files- gradio_app.py +14 -5
gradio_app.py
CHANGED
|
@@ -65,11 +65,11 @@ video_pipe = LatentVideoDiffusionPipeline.from_pretrained(
|
|
| 65 |
'lllyasviel/paints_undo_multi_frame',
|
| 66 |
fp16=True
|
| 67 |
)
|
| 68 |
-
|
| 69 |
-
memory_management.unload_all_models([
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
])
|
| 73 |
|
| 74 |
k_sampler = KDiffusionSampler(
|
| 75 |
unet=unet,
|
|
@@ -132,6 +132,9 @@ def interrogator_process(x):
|
|
| 132 |
if is_shared_ui:
|
| 133 |
raise gr.Error("This Space only works in duplicated instances")
|
| 134 |
|
|
|
|
|
|
|
|
|
|
| 135 |
return wd14tagger.default_interrogator(x)
|
| 136 |
|
| 137 |
|
|
@@ -142,6 +145,9 @@ def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed,
|
|
| 142 |
if is_shared_ui:
|
| 143 |
raise gr.Error("This Space only works in duplicated instances")
|
| 144 |
|
|
|
|
|
|
|
|
|
|
| 145 |
rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
|
| 146 |
|
| 147 |
memory_management.load_models_to_gpu(vae)
|
|
@@ -240,6 +246,9 @@ def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress
|
|
| 240 |
if is_shared_ui:
|
| 241 |
raise gr.Error("This Space only works in duplicated instances")
|
| 242 |
|
|
|
|
|
|
|
|
|
|
| 243 |
result_frames = []
|
| 244 |
cropped_images = []
|
| 245 |
|
|
|
|
| 65 |
'lllyasviel/paints_undo_multi_frame',
|
| 66 |
fp16=True
|
| 67 |
)
|
| 68 |
+
if is_gpu_associated:
|
| 69 |
+
memory_management.unload_all_models([
|
| 70 |
+
video_pipe.unet, video_pipe.vae, video_pipe.text_encoder, video_pipe.image_projection, video_pipe.image_encoder,
|
| 71 |
+
unet, vae, text_encoder
|
| 72 |
+
])
|
| 73 |
|
| 74 |
k_sampler = KDiffusionSampler(
|
| 75 |
unet=unet,
|
|
|
|
| 132 |
if is_shared_ui:
|
| 133 |
raise gr.Error("This Space only works in duplicated instances")
|
| 134 |
|
| 135 |
+
if not is_gpu_associated:
|
| 136 |
+
raise gr.Error("Please associate a T4 or A10G GPU for this Space")
|
| 137 |
+
|
| 138 |
return wd14tagger.default_interrogator(x)
|
| 139 |
|
| 140 |
|
|
|
|
| 145 |
if is_shared_ui:
|
| 146 |
raise gr.Error("This Space only works in duplicated instances")
|
| 147 |
|
| 148 |
+
if not is_gpu_associated:
|
| 149 |
+
raise gr.Error("Please associate a T4 or A10G GPU for this Space")
|
| 150 |
+
|
| 151 |
rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
|
| 152 |
|
| 153 |
memory_management.load_models_to_gpu(vae)
|
|
|
|
| 246 |
if is_shared_ui:
|
| 247 |
raise gr.Error("This Space only works in duplicated instances")
|
| 248 |
|
| 249 |
+
if not is_gpu_associated:
|
| 250 |
+
raise gr.Error("Please associate a T4 or A10G GPU for this Space")
|
| 251 |
+
|
| 252 |
result_frames = []
|
| 253 |
cropped_images = []
|
| 254 |
|