Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -50,7 +50,7 @@ def install_dependencies(enable_optimization=False):
|
|
| 50 |
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
| 51 |
shell=True,
|
| 52 |
)
|
| 53 |
-
|
| 54 |
# install apex for fused layernorm
|
| 55 |
if not _is_package_available("apex"):
|
| 56 |
subprocess.run(
|
|
@@ -100,7 +100,8 @@ def build_models(model_type, config, enable_optimization=False):
|
|
| 100 |
# handle model download logic in HuggingFace Space
|
| 101 |
from opensora.models.stdit.stdit3 import STDiT3
|
| 102 |
|
| 103 |
-
|
|
|
|
| 104 |
stdit = stdit.cuda()
|
| 105 |
|
| 106 |
# build scheduler
|
|
@@ -183,7 +184,6 @@ from opensora.utils.inference_utils import (
|
|
| 183 |
prepare_multi_resolution_info,
|
| 184 |
refine_prompts_by_openai,
|
| 185 |
split_prompt,
|
| 186 |
-
has_openai_key
|
| 187 |
)
|
| 188 |
from opensora.utils.misc import to_torch_dtype
|
| 189 |
|
|
@@ -513,13 +513,15 @@ def main():
|
|
| 513 |
with gr.Row():
|
| 514 |
with gr.Column():
|
| 515 |
prompt_text = gr.Textbox(label="Prompt", placeholder="Describe your video here", lines=4)
|
| 516 |
-
refine_prompt = gr.Checkbox(
|
|
|
|
|
|
|
| 517 |
random_prompt_btn = gr.Button("Random Prompt By GPT4o", interactive=has_openai_key())
|
| 518 |
|
| 519 |
gr.Markdown("## Basic Settings")
|
| 520 |
resolution = gr.Radio(
|
| 521 |
choices=["144p", "240p", "360p", "480p", "720p"],
|
| 522 |
-
value="
|
| 523 |
label="Resolution",
|
| 524 |
)
|
| 525 |
aspect_ratio = gr.Radio(
|
|
@@ -646,6 +648,7 @@ def main():
|
|
| 646 |
random_prompt_btn.click(fn=generate_random_prompt, outputs=prompt_text)
|
| 647 |
|
| 648 |
# launch
|
|
|
|
| 649 |
demo.launch(server_port=args.port, server_name=args.host, share=args.share, max_threads=1)
|
| 650 |
|
| 651 |
|
|
|
|
| 50 |
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
| 51 |
shell=True,
|
| 52 |
)
|
| 53 |
+
|
| 54 |
# install apex for fused layernorm
|
| 55 |
if not _is_package_available("apex"):
|
| 56 |
subprocess.run(
|
|
|
|
| 100 |
# handle model download logic in HuggingFace Space
|
| 101 |
from opensora.models.stdit.stdit3 import STDiT3
|
| 102 |
|
| 103 |
+
model_kwargs = {k: v for k, v in config.model.items() if k not in ("type", "from_pretrained", "force_huggingface")}
|
| 104 |
+
stdit = STDiT3.from_pretrained(HF_STDIT_MAP[model_type], **model_kwargs)
|
| 105 |
stdit = stdit.cuda()
|
| 106 |
|
| 107 |
# build scheduler
|
|
|
|
| 184 |
prepare_multi_resolution_info,
|
| 185 |
refine_prompts_by_openai,
|
| 186 |
split_prompt,
|
|
|
|
| 187 |
)
|
| 188 |
from opensora.utils.misc import to_torch_dtype
|
| 189 |
|
|
|
|
| 513 |
with gr.Row():
|
| 514 |
with gr.Column():
|
| 515 |
prompt_text = gr.Textbox(label="Prompt", placeholder="Describe your video here", lines=4)
|
| 516 |
+
refine_prompt = gr.Checkbox(
|
| 517 |
+
value=has_openai_key(), label="Refine prompt with GPT4o", interactive=has_openai_key()
|
| 518 |
+
)
|
| 519 |
random_prompt_btn = gr.Button("Random Prompt By GPT4o", interactive=has_openai_key())
|
| 520 |
|
| 521 |
gr.Markdown("## Basic Settings")
|
| 522 |
resolution = gr.Radio(
|
| 523 |
choices=["144p", "240p", "360p", "480p", "720p"],
|
| 524 |
+
value="480p",
|
| 525 |
label="Resolution",
|
| 526 |
)
|
| 527 |
aspect_ratio = gr.Radio(
|
|
|
|
| 648 |
random_prompt_btn.click(fn=generate_random_prompt, outputs=prompt_text)
|
| 649 |
|
| 650 |
# launch
|
| 651 |
+
demo.queue(max_size=5, default_concurrency_limit=1)
|
| 652 |
demo.launch(server_port=args.port, server_name=args.host, share=args.share, max_threads=1)
|
| 653 |
|
| 654 |
|