Spaces:
Build error
Build error
Commit
Β·
1cd5253
1
Parent(s):
e68727e
dfdffd
Browse files- README.md +4 -5
- app_gradio_spaces.py +8 -34
- cog.yaml +2 -2
- pyproject.toml +3 -3
- requirements.txt +3 -3
README.md
CHANGED
@@ -42,14 +42,13 @@ This is a Gradio deployment of [LLaMA-Omni](https://github.com/ictnlp/LLaMA-Omni
|
|
42 |
|
43 |
3. Install fairseq:
|
44 |
```bash
|
45 |
-
|
46 |
-
cd fairseq
|
47 |
-
pip install -e . --no-build-isolation
|
48 |
```
|
49 |
|
50 |
-
4. Install
|
51 |
```bash
|
52 |
-
|
|
|
53 |
```
|
54 |
|
55 |
## π Deployment
|
|
|
42 |
|
43 |
3. Install fairseq:
|
44 |
```bash
|
45 |
+
pip install git+https://github.com/pytorch/fairseq.git
|
|
|
|
|
46 |
```
|
47 |
|
48 |
+
4. Install optional dependencies (if not on Mac M1/M2):
|
49 |
```bash
|
50 |
+
# Only run this if not on Mac with Apple Silicon
|
51 |
+
pip install flash-attn
|
52 |
```
|
53 |
|
54 |
## π Deployment
|
app_gradio_spaces.py
CHANGED
@@ -51,7 +51,7 @@ def setup_environment():
|
|
51 |
|
52 |
return "β
Environment setup complete!"
|
53 |
|
54 |
-
def start_services(
|
55 |
"""Start the controller, model worker, and web server."""
|
56 |
# Start the controller
|
57 |
controller_process = run_background_process(
|
@@ -59,14 +59,6 @@ def start_services(controller_output, model_worker_output, web_server_output):
|
|
59 |
"Controller"
|
60 |
)
|
61 |
|
62 |
-
# Start a thread to read controller output
|
63 |
-
controller_thread = threading.Thread(
|
64 |
-
target=read_process_output,
|
65 |
-
args=(controller_process, controller_output, "Controller"),
|
66 |
-
daemon=True
|
67 |
-
)
|
68 |
-
controller_thread.start()
|
69 |
-
|
70 |
# Wait for controller to start
|
71 |
time.sleep(5)
|
72 |
|
@@ -76,14 +68,6 @@ def start_services(controller_output, model_worker_output, web_server_output):
|
|
76 |
"Model Worker"
|
77 |
)
|
78 |
|
79 |
-
# Start a thread to read model worker output
|
80 |
-
model_worker_thread = threading.Thread(
|
81 |
-
target=read_process_output,
|
82 |
-
args=(model_worker_process, model_worker_output, "Model Worker"),
|
83 |
-
daemon=True
|
84 |
-
)
|
85 |
-
model_worker_thread.start()
|
86 |
-
|
87 |
# Wait for model worker to start
|
88 |
time.sleep(10)
|
89 |
|
@@ -93,18 +77,10 @@ def start_services(controller_output, model_worker_output, web_server_output):
|
|
93 |
"Web Server"
|
94 |
)
|
95 |
|
96 |
-
# Start a thread to read web server output
|
97 |
-
web_server_thread = threading.Thread(
|
98 |
-
target=read_process_output,
|
99 |
-
args=(web_server_process, web_server_output, "Web Server"),
|
100 |
-
daemon=True
|
101 |
-
)
|
102 |
-
web_server_thread.start()
|
103 |
-
|
104 |
# Wait for web server to start
|
105 |
time.sleep(5)
|
106 |
|
107 |
-
return "β
All services started! Click
|
108 |
|
109 |
def build_ui():
|
110 |
"""Build the Gradio UI."""
|
@@ -120,19 +96,17 @@ def build_ui():
|
|
120 |
start_btn = gr.Button("Start LLaMA-Omni Services")
|
121 |
status_output = gr.Textbox(label="Status", value="Click 'Start LLaMA-Omni Services' to begin.")
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
web_server_output = gr.Markdown("Web Server not started")
|
127 |
|
128 |
start_btn.click(
|
129 |
start_services,
|
130 |
-
|
131 |
-
outputs=[status_output, controller_output, model_worker_output, web_server_output]
|
132 |
)
|
133 |
|
134 |
interface_btn = gr.Button("Open Interface")
|
135 |
-
interface_btn.click(lambda: gr.
|
136 |
|
137 |
with gr.Tab("About"):
|
138 |
gr.Markdown("""
|
@@ -157,4 +131,4 @@ def build_ui():
|
|
157 |
|
158 |
if __name__ == "__main__":
|
159 |
demo = build_ui()
|
160 |
-
demo.launch(server_port=7860)
|
|
|
51 |
|
52 |
return "β
Environment setup complete!"
|
53 |
|
54 |
+
def start_services():
|
55 |
"""Start the controller, model worker, and web server."""
|
56 |
# Start the controller
|
57 |
controller_process = run_background_process(
|
|
|
59 |
"Controller"
|
60 |
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
# Wait for controller to start
|
63 |
time.sleep(5)
|
64 |
|
|
|
68 |
"Model Worker"
|
69 |
)
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
# Wait for model worker to start
|
72 |
time.sleep(10)
|
73 |
|
|
|
77 |
"Web Server"
|
78 |
)
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
# Wait for web server to start
|
81 |
time.sleep(5)
|
82 |
|
83 |
+
return "β
All services started successfully! Click 'Open Interface' to access the application."
|
84 |
|
85 |
def build_ui():
|
86 |
"""Build the Gradio UI."""
|
|
|
96 |
start_btn = gr.Button("Start LLaMA-Omni Services")
|
97 |
status_output = gr.Textbox(label="Status", value="Click 'Start LLaMA-Omni Services' to begin.")
|
98 |
|
99 |
+
controller_output = gr.Markdown(value="Controller not started")
|
100 |
+
model_worker_output = gr.Markdown(value="Model Worker not started")
|
101 |
+
web_server_output = gr.Markdown(value="Web Server not started")
|
|
|
102 |
|
103 |
start_btn.click(
|
104 |
start_services,
|
105 |
+
outputs=status_output
|
|
|
106 |
)
|
107 |
|
108 |
interface_btn = gr.Button("Open Interface")
|
109 |
+
interface_btn.click(lambda: gr.Redirect("http://localhost:8001"), None, None)
|
110 |
|
111 |
with gr.Tab("About"):
|
112 |
gr.Markdown("""
|
|
|
131 |
|
132 |
if __name__ == "__main__":
|
133 |
demo = build_ui()
|
134 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
cog.yaml
CHANGED
@@ -3,6 +3,7 @@ build:
|
|
3 |
python_version: "3.10"
|
4 |
python_packages:
|
5 |
- "torch==2.0.1"
|
|
|
6 |
- "transformers==4.34.0"
|
7 |
- "accelerate==0.21.0"
|
8 |
- "gradio==3.50.2"
|
@@ -10,9 +11,7 @@ build:
|
|
10 |
- "uvicorn==0.23.2"
|
11 |
- "pydantic==2.3.0"
|
12 |
- "openai-whisper==20231117"
|
13 |
-
- "numpy==1.24.0"
|
14 |
- "tqdm==4.66.1"
|
15 |
-
- "flash-attn==2.3.0"
|
16 |
- "requests==2.31.0"
|
17 |
system_packages:
|
18 |
- "wget"
|
@@ -20,6 +19,7 @@ build:
|
|
20 |
- "libsndfile1"
|
21 |
run:
|
22 |
- "pip install -e git+https://github.com/pytorch/fairseq.git#egg=fairseq"
|
|
|
23 |
- "mkdir -p vocoder"
|
24 |
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/g_00500000 -P vocoder/"
|
25 |
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/config.json -P vocoder/"
|
|
|
3 |
python_version: "3.10"
|
4 |
python_packages:
|
5 |
- "torch==2.0.1"
|
6 |
+
- "numpy==1.24.0"
|
7 |
- "transformers==4.34.0"
|
8 |
- "accelerate==0.21.0"
|
9 |
- "gradio==3.50.2"
|
|
|
11 |
- "uvicorn==0.23.2"
|
12 |
- "pydantic==2.3.0"
|
13 |
- "openai-whisper==20231117"
|
|
|
14 |
- "tqdm==4.66.1"
|
|
|
15 |
- "requests==2.31.0"
|
16 |
system_packages:
|
17 |
- "wget"
|
|
|
19 |
- "libsndfile1"
|
20 |
run:
|
21 |
- "pip install -e git+https://github.com/pytorch/fairseq.git#egg=fairseq"
|
22 |
+
- "if [ $(uname -m) != 'arm64' ] || [ $(uname -s) != 'Darwin' ]; then pip install flash-attn==2.3.0; fi"
|
23 |
- "mkdir -p vocoder"
|
24 |
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/g_00500000 -P vocoder/"
|
25 |
- "wget https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/config.json -P vocoder/"
|
pyproject.toml
CHANGED
@@ -16,15 +16,15 @@ readme = "README.md"
|
|
16 |
requires-python = ">=3.10"
|
17 |
dependencies = [
|
18 |
"torch>=2.0.0",
|
|
|
19 |
"transformers>=4.34.0",
|
20 |
"accelerate>=0.21.0",
|
21 |
"gradio>=3.50.2",
|
22 |
"fastapi>=0.104.0",
|
23 |
"uvicorn>=0.23.2",
|
24 |
"pydantic>=2.3.0",
|
25 |
-
"whisper>=0.0.1",
|
26 |
-
"numpy>=1.24.0",
|
27 |
"tqdm>=4.66.1",
|
28 |
-
"
|
29 |
"fairseq>=0.12.2",
|
30 |
]
|
|
|
16 |
requires-python = ">=3.10"
|
17 |
dependencies = [
|
18 |
"torch>=2.0.0",
|
19 |
+
"numpy>=1.24.0",
|
20 |
"transformers>=4.34.0",
|
21 |
"accelerate>=0.21.0",
|
22 |
"gradio>=3.50.2",
|
23 |
"fastapi>=0.104.0",
|
24 |
"uvicorn>=0.23.2",
|
25 |
"pydantic>=2.3.0",
|
26 |
+
"openai-whisper>=0.0.1",
|
|
|
27 |
"tqdm>=4.66.1",
|
28 |
+
"requests>=2.31.0",
|
29 |
"fairseq>=0.12.2",
|
30 |
]
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
torch>=2.0.0
|
|
|
2 |
transformers>=4.34.0
|
3 |
accelerate>=0.21.0
|
4 |
gradio>=3.50.2
|
@@ -6,8 +7,7 @@ fastapi>=0.104.0
|
|
6 |
uvicorn>=0.23.2
|
7 |
pydantic>=2.3.0
|
8 |
openai-whisper>=0.0.1
|
9 |
-
numpy>=1.24.0
|
10 |
tqdm>=4.66.1
|
|
|
11 |
git+https://github.com/pytorch/fairseq.git
|
12 |
-
flash-attn>=2.3.0
|
13 |
-
requests>=2.31.0
|
|
|
1 |
torch>=2.0.0
|
2 |
+
numpy>=1.24.0
|
3 |
transformers>=4.34.0
|
4 |
accelerate>=0.21.0
|
5 |
gradio>=3.50.2
|
|
|
7 |
uvicorn>=0.23.2
|
8 |
pydantic>=2.3.0
|
9 |
openai-whisper>=0.0.1
|
|
|
10 |
tqdm>=4.66.1
|
11 |
+
requests>=2.31.0
|
12 |
git+https://github.com/pytorch/fairseq.git
|
13 |
+
flash-attn>=2.3.0; platform_system != "Darwin" or platform_machine != "arm64"
|
|