Upload 4 files
Browse files- README.md +12 -12
- app.py +316 -311
- pre-requirements.txt +1 -0
- requirements.txt +12 -10
README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
---
|
2 |
-
title: MIKU TTS
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: litagin/rvc_okiba_TTS
|
11 |
-
---
|
12 |
-
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: MIKU TTS
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.16.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: litagin/rvc_okiba_TTS
|
11 |
+
---
|
12 |
+
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,311 +1,316 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
import
|
7 |
-
|
8 |
-
import
|
9 |
-
import
|
10 |
-
import
|
11 |
-
import
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
from
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
#
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
if if_f0 == 1:
|
67 |
-
net_g =
|
68 |
-
else:
|
69 |
-
net_g =
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
print(
|
132 |
-
print(
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
)
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
)
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
)
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
""
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
label="
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
label="
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces # in windows env, delete related to "spaces"
|
2 |
+
@spaces.GPU
|
3 |
+
def gpu():
|
4 |
+
pass
|
5 |
+
|
6 |
+
import asyncio
|
7 |
+
import datetime
|
8 |
+
import logging
|
9 |
+
import os
|
10 |
+
import time
|
11 |
+
import traceback
|
12 |
+
|
13 |
+
import edge_tts
|
14 |
+
import gradio as gr
|
15 |
+
import librosa
|
16 |
+
import torch
|
17 |
+
from fairseq import checkpoint_utils
|
18 |
+
from huggingface_hub import snapshot_download
|
19 |
+
|
20 |
+
|
21 |
+
from config import Config
|
22 |
+
from lib.infer_pack.models import (
|
23 |
+
SynthesizerTrnMs256NSFsid,
|
24 |
+
SynthesizerTrnMs256NSFsid_nono,
|
25 |
+
SynthesizerTrnMs768NSFsid,
|
26 |
+
SynthesizerTrnMs768NSFsid_nono,
|
27 |
+
)
|
28 |
+
from rmvpe import RMVPE
|
29 |
+
from vc_infer_pipeline import VC
|
30 |
+
|
31 |
+
logging.getLogger("fairseq").setLevel(logging.WARNING)
|
32 |
+
logging.getLogger("numba").setLevel(logging.WARNING)
|
33 |
+
logging.getLogger("markdown_it").setLevel(logging.WARNING)
|
34 |
+
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
35 |
+
logging.getLogger("matplotlib").setLevel(logging.WARNING)
|
36 |
+
|
37 |
+
limitation = os.getenv("SYSTEM") == "spaces"
|
38 |
+
|
39 |
+
config = Config()
|
40 |
+
|
41 |
+
# Edge TTS
|
42 |
+
edge_output_filename = "edge_output.mp3"
|
43 |
+
tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
|
44 |
+
tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
|
45 |
+
|
46 |
+
# RVC models
|
47 |
+
model_root = snapshot_download(repo_id="NoCrypt/miku_RVC", token=os.getenv("TOKEN", None))
|
48 |
+
models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")]
|
49 |
+
models.sort()
|
50 |
+
|
51 |
+
|
52 |
+
def model_data(model_name):
|
53 |
+
# global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
|
54 |
+
pth_path = [
|
55 |
+
f"{model_root}/{model_name}/{f}"
|
56 |
+
for f in os.listdir(f"{model_root}/{model_name}")
|
57 |
+
if f.endswith(".pth")
|
58 |
+
][0]
|
59 |
+
print(f"Loading {pth_path}")
|
60 |
+
cpt = torch.load(pth_path, map_location="cpu")
|
61 |
+
tgt_sr = cpt["config"][-1]
|
62 |
+
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
63 |
+
if_f0 = cpt.get("f0", 1)
|
64 |
+
version = cpt.get("version", "v1")
|
65 |
+
if version == "v1":
|
66 |
+
if if_f0 == 1:
|
67 |
+
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
68 |
+
else:
|
69 |
+
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
70 |
+
elif version == "v2":
|
71 |
+
if if_f0 == 1:
|
72 |
+
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
73 |
+
else:
|
74 |
+
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
75 |
+
else:
|
76 |
+
raise ValueError("Unknown version")
|
77 |
+
del net_g.enc_q
|
78 |
+
net_g.load_state_dict(cpt["weight"], strict=False)
|
79 |
+
print("Model loaded")
|
80 |
+
net_g.eval().to(config.device)
|
81 |
+
if config.is_half:
|
82 |
+
net_g = net_g.half()
|
83 |
+
else:
|
84 |
+
net_g = net_g.float()
|
85 |
+
vc = VC(tgt_sr, config)
|
86 |
+
# n_spk = cpt["config"][-3]
|
87 |
+
|
88 |
+
index_files = [
|
89 |
+
f"{model_root}/{model_name}/{f}"
|
90 |
+
for f in os.listdir(f"{model_root}/{model_name}")
|
91 |
+
if f.endswith(".index")
|
92 |
+
]
|
93 |
+
if len(index_files) == 0:
|
94 |
+
print("No index file found")
|
95 |
+
index_file = ""
|
96 |
+
else:
|
97 |
+
index_file = index_files[0]
|
98 |
+
print(f"Index file found: {index_file}")
|
99 |
+
|
100 |
+
return tgt_sr, net_g, vc, version, index_file, if_f0
|
101 |
+
|
102 |
+
|
103 |
+
def load_hubert():
|
104 |
+
# global hubert_model
|
105 |
+
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
106 |
+
["hubert_base.pt"],
|
107 |
+
suffix="",
|
108 |
+
)
|
109 |
+
hubert_model = models[0]
|
110 |
+
hubert_model = hubert_model.to(config.device)
|
111 |
+
if config.is_half:
|
112 |
+
hubert_model = hubert_model.half()
|
113 |
+
else:
|
114 |
+
hubert_model = hubert_model.float()
|
115 |
+
return hubert_model.eval()
|
116 |
+
|
117 |
+
|
118 |
+
def tts(
|
119 |
+
model_name,
|
120 |
+
speed,
|
121 |
+
tts_text,
|
122 |
+
tts_voice,
|
123 |
+
f0_up_key,
|
124 |
+
f0_method,
|
125 |
+
index_rate,
|
126 |
+
protect,
|
127 |
+
filter_radius=3,
|
128 |
+
resample_sr=0,
|
129 |
+
rms_mix_rate=0.25,
|
130 |
+
):
|
131 |
+
print("------------------")
|
132 |
+
print(datetime.datetime.now())
|
133 |
+
print("tts_text:")
|
134 |
+
print(tts_text)
|
135 |
+
print(f"tts_voice: {tts_voice}, speed: {speed}")
|
136 |
+
print(f"Model name: {model_name}")
|
137 |
+
print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
|
138 |
+
try:
|
139 |
+
if limitation and len(tts_text) > 1000:
|
140 |
+
print("Error: Text too long")
|
141 |
+
return (
|
142 |
+
f"Text characters should be at most 1000 in this huggingface space, but got {len(tts_text)} characters.",
|
143 |
+
None,
|
144 |
+
None,
|
145 |
+
)
|
146 |
+
t0 = time.time()
|
147 |
+
if speed >= 0:
|
148 |
+
speed_str = f"+{speed}%"
|
149 |
+
else:
|
150 |
+
speed_str = f"{speed}%"
|
151 |
+
asyncio.run(
|
152 |
+
edge_tts.Communicate(
|
153 |
+
tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str
|
154 |
+
).save(edge_output_filename)
|
155 |
+
)
|
156 |
+
t1 = time.time()
|
157 |
+
edge_time = t1 - t0
|
158 |
+
audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
|
159 |
+
duration = len(audio) / sr
|
160 |
+
print(f"Audio duration: {duration}s")
|
161 |
+
if limitation and duration >= 200:
|
162 |
+
print("Error: Audio too long")
|
163 |
+
return (
|
164 |
+
f"Audio should be less than 200 seconds in this huggingface space, but got {duration}s.",
|
165 |
+
edge_output_filename,
|
166 |
+
None,
|
167 |
+
)
|
168 |
+
f0_up_key = int(f0_up_key)
|
169 |
+
|
170 |
+
tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
|
171 |
+
if f0_method == "rmvpe":
|
172 |
+
vc.model_rmvpe = rmvpe_model
|
173 |
+
times = [0, 0, 0]
|
174 |
+
audio_opt = vc.pipeline(
|
175 |
+
hubert_model,
|
176 |
+
net_g,
|
177 |
+
0,
|
178 |
+
audio,
|
179 |
+
edge_output_filename,
|
180 |
+
times,
|
181 |
+
f0_up_key,
|
182 |
+
f0_method,
|
183 |
+
index_file,
|
184 |
+
# file_big_npy,
|
185 |
+
index_rate,
|
186 |
+
if_f0,
|
187 |
+
filter_radius,
|
188 |
+
tgt_sr,
|
189 |
+
resample_sr,
|
190 |
+
rms_mix_rate,
|
191 |
+
version,
|
192 |
+
protect,
|
193 |
+
None,
|
194 |
+
)
|
195 |
+
if tgt_sr != resample_sr >= 16000:
|
196 |
+
tgt_sr = resample_sr
|
197 |
+
info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
|
198 |
+
print(info)
|
199 |
+
return (
|
200 |
+
info,
|
201 |
+
edge_output_filename,
|
202 |
+
(tgt_sr, audio_opt),
|
203 |
+
)
|
204 |
+
except EOFError:
|
205 |
+
info = (
|
206 |
+
"It seems that the edge-tts output is not valid. "
|
207 |
+
"This may occur when the input text and the speaker do not match. "
|
208 |
+
"For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
|
209 |
+
)
|
210 |
+
print(info)
|
211 |
+
return info, None, None
|
212 |
+
except:
|
213 |
+
info = traceback.format_exc()
|
214 |
+
print(info)
|
215 |
+
return info, None, None
|
216 |
+
|
217 |
+
|
218 |
+
print("Loading hubert model...")
|
219 |
+
hubert_model = load_hubert()
|
220 |
+
print("Hubert model loaded.")
|
221 |
+
|
222 |
+
print("Loading rmvpe model...")
|
223 |
+
rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
|
224 |
+
print("rmvpe model loaded.")
|
225 |
+
|
226 |
+
initial_md = """
|
227 |
+

|
228 |
+
"""
|
229 |
+
|
230 |
+
app = gr.Blocks(theme='NoCrypt/miku')
|
231 |
+
with app:
|
232 |
+
gr.Markdown(initial_md)
|
233 |
+
with gr.Row():
|
234 |
+
with gr.Column():
|
235 |
+
model_name = gr.Dropdown(
|
236 |
+
label="Model",
|
237 |
+
choices=models,
|
238 |
+
value=models[0],
|
239 |
+
)
|
240 |
+
f0_key_up = gr.Number(
|
241 |
+
label="Tune",
|
242 |
+
value=6,
|
243 |
+
)
|
244 |
+
with gr.Column():
|
245 |
+
f0_method = gr.Radio(
|
246 |
+
label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)",
|
247 |
+
choices=["pm", "rmvpe"], # harvest and crepe is too slow
|
248 |
+
value="rmvpe",
|
249 |
+
interactive=True,
|
250 |
+
)
|
251 |
+
index_rate = gr.Slider(
|
252 |
+
minimum=0,
|
253 |
+
maximum=1,
|
254 |
+
label="Index rate",
|
255 |
+
value=1,
|
256 |
+
interactive=True,
|
257 |
+
)
|
258 |
+
protect0 = gr.Slider(
|
259 |
+
minimum=0,
|
260 |
+
maximum=0.5,
|
261 |
+
label="Protect",
|
262 |
+
value=0.33,
|
263 |
+
step=0.01,
|
264 |
+
interactive=True,
|
265 |
+
)
|
266 |
+
with gr.Row():
|
267 |
+
with gr.Column():
|
268 |
+
tts_voice = gr.Dropdown(
|
269 |
+
label="Edge-tts speaker (format: language-Country-Name-Gender), make sure the gender matches the model",
|
270 |
+
choices=tts_voices,
|
271 |
+
allow_custom_value=False,
|
272 |
+
value="ja-JP-NanamiNeural-Female",
|
273 |
+
)
|
274 |
+
speed = gr.Slider(
|
275 |
+
minimum=-100,
|
276 |
+
maximum=100,
|
277 |
+
label="Speech speed (%)",
|
278 |
+
value=0,
|
279 |
+
step=10,
|
280 |
+
interactive=True,
|
281 |
+
)
|
282 |
+
tts_text = gr.Textbox(label="Input Text", value="こんにちは、私の名前は初音ミクです!")
|
283 |
+
with gr.Column():
|
284 |
+
but0 = gr.Button("Convert", variant="primary")
|
285 |
+
info_text = gr.Textbox(label="Output info")
|
286 |
+
with gr.Column():
|
287 |
+
with gr.Accordion("Edge Voice", open=False):
|
288 |
+
edge_tts_output = gr.Audio(label="Edge Voice", type="filepath")
|
289 |
+
tts_output = gr.Audio(label="Result")
|
290 |
+
but0.click(
|
291 |
+
tts,
|
292 |
+
[
|
293 |
+
model_name,
|
294 |
+
speed,
|
295 |
+
tts_text,
|
296 |
+
tts_voice,
|
297 |
+
f0_key_up,
|
298 |
+
f0_method,
|
299 |
+
index_rate,
|
300 |
+
protect0,
|
301 |
+
],
|
302 |
+
[info_text, edge_tts_output, tts_output],
|
303 |
+
)
|
304 |
+
with gr.Row():
|
305 |
+
examples = gr.Examples(
|
306 |
+
examples_per_page=100,
|
307 |
+
examples=[
|
308 |
+
["こんにちは、私の名前は初音ミクです!", "ja-JP-NanamiNeural-Female", 6],
|
309 |
+
["Hello there. My name is Hatsune Miku!","en-CA-ClaraNeural-Female", 6],
|
310 |
+
["Halo. Nama saya Hatsune Miku!","id-ID-GadisNeural-Female", 4],
|
311 |
+
["Halo. Jenengku Hatsune Miku!","jv-ID-SitiNeural-Female", 10],
|
312 |
+
],
|
313 |
+
inputs=[tts_text, tts_voice, f0_key_up],
|
314 |
+
)
|
315 |
+
|
316 |
+
app.launch(ssr_mode=False)
|
pre-requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip==24.0
|
requirements.txt
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
1 |
+
torch<2.6
|
2 |
+
edge_tts>=6.1.7
|
3 |
+
fairseq>=0.12.2
|
4 |
+
faiss_cpu>=1.7.4
|
5 |
+
#gradio==3.38.0
|
6 |
+
librosa>=0.9.1
|
7 |
+
numpy==1.23.5
|
8 |
+
praat-parselmouth>=0.4.3
|
9 |
+
pyworld>=0.3.4
|
10 |
+
torchcrepe>=0.0.20
|
11 |
+
huggingface_hub
|
12 |
+
# https://github.com/facebookresearch/fairseq/issues/5511
|