llama-omni / cog.yaml
marcosremar2's picture
dfdfdf
34b8b49
raw
history blame
894 Bytes
build:
gpu: true
python_version: "3.10"
python_packages:
- "torch==2.0.1"
- "transformers==4.34.0"
- "accelerate==0.21.0"
- "gradio==3.50.2"
- "fastapi==0.104.0"
- "uvicorn==0.23.2"
- "pydantic==2.3.0"
- "openai-whisper==20231117"
- "numpy==1.24.0"
- "tqdm==4.66.1"
- "flash-attn==2.3.0"
- "requests==2.31.0"
system_packages:
- "wget"
- "ffmpeg"
- "libsndfile1"
run:
- "pip install -e git+https://github.com/pytorch/fairseq.git#egg=fairseq"
- "mkdir -p vocoder"
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/g_00500000 -P vocoder/"
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/config.json -P vocoder/"
predict: "predict.py:Predictor"