Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import re
|
|
|
2 |
import ffmpy
|
3 |
import asyncio
|
|
|
4 |
import edge_tts
|
5 |
import subprocess
|
6 |
import gradio as gr
|
@@ -9,6 +11,11 @@ from gradio_client import Client
|
|
9 |
from http.client import RemoteDisconnected
|
10 |
from list_dict import translates, speakers
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
translate = translates
|
13 |
tr = list(translate.keys())[9]
|
14 |
language = translate[tr]
|
@@ -44,8 +51,8 @@ def left_justified(audio):
|
|
44 |
raise gr.Error('No start sound detected!')
|
45 |
return start_justified
|
46 |
|
47 |
-
def time_verify():
|
48 |
-
audios = [
|
49 |
justified = []
|
50 |
time_lists = []
|
51 |
|
@@ -134,7 +141,7 @@ def video_inputs(video, TR_LANGUAGE, LANGUAGE, SPEAKER):
|
|
134 |
await communicate.save(text_to_speech)
|
135 |
asyncio.run(amain())
|
136 |
|
137 |
-
|
138 |
ff = ffmpy.FFmpeg(
|
139 |
inputs={
|
140 |
text_to_speech: None
|
@@ -144,6 +151,7 @@ def video_inputs(video, TR_LANGUAGE, LANGUAGE, SPEAKER):
|
|
144 |
}
|
145 |
)
|
146 |
ff.run()
|
|
|
147 |
if j_time > 0:
|
148 |
ff = ffmpy.FFmpeg(
|
149 |
inputs={
|
|
|
1 |
import re
|
2 |
+
import os
|
3 |
import ffmpy
|
4 |
import asyncio
|
5 |
+
import tarfile
|
6 |
import edge_tts
|
7 |
import subprocess
|
8 |
import gradio as gr
|
|
|
11 |
from http.client import RemoteDisconnected
|
12 |
from list_dict import translates, speakers
|
13 |
|
14 |
+
if not os.path.exists('pretrained_models'):
|
15 |
+
# 解压tar.gz文件
|
16 |
+
with tarfile.open('2stems.tar.gz', 'r:gz') as tar_ref:
|
17 |
+
tar_ref.extractall('./pretrained_models/2stems')
|
18 |
+
|
19 |
translate = translates
|
20 |
tr = list(translate.keys())[9]
|
21 |
language = translate[tr]
|
|
|
51 |
raise gr.Error('No start sound detected!')
|
52 |
return start_justified
|
53 |
|
54 |
+
def time_verify(vocals_audio, target_audio):
|
55 |
+
audios = [vocals_audio, target_audio]
|
56 |
justified = []
|
57 |
time_lists = []
|
58 |
|
|
|
141 |
await communicate.save(text_to_speech)
|
142 |
asyncio.run(amain())
|
143 |
|
144 |
+
r_time = time_verify(vocals_monorail, text_to_speech)
|
145 |
ff = ffmpy.FFmpeg(
|
146 |
inputs={
|
147 |
text_to_speech: None
|
|
|
151 |
}
|
152 |
)
|
153 |
ff.run()
|
154 |
+
j_time = time_verify(vocals_monorail, output_rate_audio)
|
155 |
if j_time > 0:
|
156 |
ff = ffmpy.FFmpeg(
|
157 |
inputs={
|