File size: 3,650 Bytes
e88a846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# FantasyTalking部署脚本

import os
import subprocess
import sys
from pathlib import Path


def check_gpu():
    """检查GPU可用性"""
    try:
        import torch
        if torch.cuda.is_available():
            gpu_count = torch.cuda.device_count()
            gpu_name = torch.cuda.get_device_name(0) if gpu_count > 0 else "Unknown"
            gpu_memory = torch.cuda.get_device_properties(0).total_memory // (1024**3) if gpu_count > 0 else 0
            
            print(f"✅ GPU可用: {gpu_name}")
            print(f"✅ GPU内存: {gpu_memory}GB")
            
            if gpu_memory < 5:
                print("⚠️  警告: GPU内存可能不足,建议至少5GB VRAM")
            
            return True
        else:
            print("❌ 未检测到可用的GPU")
            return False
    except ImportError:
        print("❌ PyTorch未安装")
        return False


def install_dependencies():
    """安装依赖"""
    print("📦 安装依赖包...")
    subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
    print("✅ 依赖安装完成")


def download_models():
    """下载模型(需要huggingface-cli)"""
    print("📥 开始下载模型...")
    
    models_dir = Path("./models")
    models_dir.mkdir(exist_ok=True)
    
    # 检查huggingface-cli
    try:
        subprocess.check_call(["huggingface-cli", "--help"], stdout=subprocess.DEVNULL)
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("安装huggingface-hub[cli]...")
        subprocess.check_call([sys.executable, "-m", "pip", "install", "huggingface_hub[cli]"])
    
    # 下载模型
    models_to_download = [
        ("Wan-AI/Wan2.1-I2V-14B-720P", "./models/Wan2.1-I2V-14B-720P"),
        ("facebook/wav2vec2-base-960h", "./models/wav2vec2-base-960h"),
    ]
    
    for model_id, local_dir in models_to_download:
        print(f"下载 {model_id}...")
        subprocess.check_call([
            "huggingface-cli", "download", model_id, 
            "--local-dir", local_dir
        ])
    
    # 下载FantasyTalking权重
    print("下载FantasyTalking权重...")
    subprocess.check_call([
        "huggingface-cli", "download", "acvlab/FantasyTalking", 
        "fantasytalking_model.ckpt", "--local-dir", "./models"
    ])
    
    print("✅ 模型下载完成")


def check_model_files():
    """检查模型文件"""
    required_files = [
        "./models/Wan2.1-I2V-14B-720P",
        "./models/wav2vec2-base-960h", 
        "./models/fantasytalking_model.ckpt"
    ]
    
    missing_files = []
    for file_path in required_files:
        if not os.path.exists(file_path):
            missing_files.append(file_path)
    
    if missing_files:
        print("❌ 缺少以下模型文件:")
        for file in missing_files:
            print(f"   - {file}")
        return False
    else:
        print("✅ 所有模型文件已就绪")
        return True


def start_app():
    """启动应用"""
    print("🚀 启动FantasyTalking应用...")
    subprocess.check_call([sys.executable, "app.py"])


def main():
    """主函数"""
    print("🎬 FantasyTalking 自动部署脚本")
    print("=" * 50)
    
    # 检查GPU
    if not check_gpu():
        print("⚠️  继续使用CPU模式(速度会很慢)")
    
    # 安装依赖
    install_dependencies()
    
    # 检查模型文件
    if not check_model_files():
        print("📥 需要下载模型文件...")
        download_models()
    
    print("✅ 部署完成!")
    print("\n启动应用...")
    start_app()


if __name__ == "__main__":
    main()