Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -134,7 +134,7 @@ def generate_t2v_video(self, prompt, lora_alpha, num_inference_steps,
|
|
| 134 |
|
| 135 |
@spaces.GPU(duration=200)
|
| 136 |
def generate_video(prompt: str):
|
| 137 |
-
|
| 138 |
|
| 139 |
# Load model using patched manager
|
| 140 |
|
|
@@ -142,13 +142,13 @@ def generate_video(prompt: str):
|
|
| 142 |
model_manager = ModelManager(device="cuda")
|
| 143 |
base_dir = "model_zoo/PusaV1/Wan2.1-T2V-14B"
|
| 144 |
|
| 145 |
-
model_files = sorted([os.path.join(
|
| 146 |
|
| 147 |
model_manager.load_models(
|
| 148 |
[
|
| 149 |
model_files,
|
| 150 |
-
os.path.join(
|
| 151 |
-
os.path.join(
|
| 152 |
],
|
| 153 |
torch_dtype=torch.bfloat16,
|
| 154 |
)
|
|
@@ -202,4 +202,8 @@ with gr.Blocks() as demo:
|
|
| 202 |
|
| 203 |
generate_btn.click(fn=generate_video, inputs=prompt_input, outputs=video_output)
|
| 204 |
|
| 205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
@spaces.GPU(duration=200)
|
| 136 |
def generate_video(prompt: str):
|
| 137 |
+
|
| 138 |
|
| 139 |
# Load model using patched manager
|
| 140 |
|
|
|
|
| 142 |
model_manager = ModelManager(device="cuda")
|
| 143 |
base_dir = "model_zoo/PusaV1/Wan2.1-T2V-14B"
|
| 144 |
|
| 145 |
+
model_files = sorted([os.path.join(base_dir, f) for f in os.listdir(base_dir) if f.endswith('.safetensors')])
|
| 146 |
|
| 147 |
model_manager.load_models(
|
| 148 |
[
|
| 149 |
model_files,
|
| 150 |
+
os.path.join(base_dir, "models_t5_umt5-xxl-enc-bf16.pth"),
|
| 151 |
+
os.path.join(base_dir, "Wan2.1_VAE.pth"),
|
| 152 |
],
|
| 153 |
torch_dtype=torch.bfloat16,
|
| 154 |
)
|
|
|
|
| 202 |
|
| 203 |
generate_btn.click(fn=generate_video, inputs=prompt_input, outputs=video_output)
|
| 204 |
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
if __name__ == "__main__":
|
| 208 |
+
ensure_model_downloaded()
|
| 209 |
+
demo.launch(share=True, show_error=True)
|