Spaces:
Running
Running
Remove test code
Browse files
app.py
CHANGED
|
@@ -16,16 +16,6 @@ from hyvideo.constants import NEGATIVE_PROMPT
|
|
| 16 |
|
| 17 |
from huggingface_hub import snapshot_download
|
| 18 |
|
| 19 |
-
if torch.cuda.device_count() == 0:
|
| 20 |
-
class Arguments:
|
| 21 |
-
def __init__(self, input_dir, output_dir):
|
| 22 |
-
self.input_dir = input_dir
|
| 23 |
-
self.output_dir = output_dir
|
| 24 |
-
|
| 25 |
-
# Create the object
|
| 26 |
-
args = Arguments("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
|
| 27 |
-
preprocess_text_encoder_tokenizer(args)
|
| 28 |
-
|
| 29 |
if torch.cuda.device_count() > 0:
|
| 30 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
| 31 |
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|
|
|
|
| 16 |
|
| 17 |
from huggingface_hub import snapshot_download
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
if torch.cuda.device_count() > 0:
|
| 20 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
| 21 |
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|