John6666 commited on
Commit
36d4797
·
verified ·
1 Parent(s): 28cf14c

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +12 -12
  2. app.py +316 -311
  3. pre-requirements.txt +1 -0
  4. requirements.txt +12 -10
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: MIKU TTS
3
- emoji: ⚡
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 4.39.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: litagin/rvc_okiba_TTS
11
- ---
12
-
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: MIKU TTS
3
+ emoji: ⚡
4
+ colorFrom: blue
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 5.16.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: litagin/rvc_okiba_TTS
11
+ ---
12
+
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,311 +1,316 @@
1
- import asyncio
2
- import datetime
3
- import logging
4
- import os
5
- import time
6
- import traceback
7
-
8
- import edge_tts
9
- import gradio as gr
10
- import librosa
11
- import torch
12
- from fairseq import checkpoint_utils
13
- from huggingface_hub import snapshot_download
14
-
15
-
16
- from config import Config
17
- from lib.infer_pack.models import (
18
- SynthesizerTrnMs256NSFsid,
19
- SynthesizerTrnMs256NSFsid_nono,
20
- SynthesizerTrnMs768NSFsid,
21
- SynthesizerTrnMs768NSFsid_nono,
22
- )
23
- from rmvpe import RMVPE
24
- from vc_infer_pipeline import VC
25
-
26
- logging.getLogger("fairseq").setLevel(logging.WARNING)
27
- logging.getLogger("numba").setLevel(logging.WARNING)
28
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
29
- logging.getLogger("urllib3").setLevel(logging.WARNING)
30
- logging.getLogger("matplotlib").setLevel(logging.WARNING)
31
-
32
- limitation = os.getenv("SYSTEM") == "spaces"
33
-
34
- config = Config()
35
-
36
- # Edge TTS
37
- edge_output_filename = "edge_output.mp3"
38
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
39
- tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
40
-
41
- # RVC models
42
- model_root = snapshot_download(repo_id="NoCrypt/miku_RVC", token=os.environ["TOKEN"])
43
- models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")]
44
- models.sort()
45
-
46
-
47
- def model_data(model_name):
48
- # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
49
- pth_path = [
50
- f"{model_root}/{model_name}/{f}"
51
- for f in os.listdir(f"{model_root}/{model_name}")
52
- if f.endswith(".pth")
53
- ][0]
54
- print(f"Loading {pth_path}")
55
- cpt = torch.load(pth_path, map_location="cpu")
56
- tgt_sr = cpt["config"][-1]
57
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
58
- if_f0 = cpt.get("f0", 1)
59
- version = cpt.get("version", "v1")
60
- if version == "v1":
61
- if if_f0 == 1:
62
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
63
- else:
64
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
65
- elif version == "v2":
66
- if if_f0 == 1:
67
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
68
- else:
69
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
70
- else:
71
- raise ValueError("Unknown version")
72
- del net_g.enc_q
73
- net_g.load_state_dict(cpt["weight"], strict=False)
74
- print("Model loaded")
75
- net_g.eval().to(config.device)
76
- if config.is_half:
77
- net_g = net_g.half()
78
- else:
79
- net_g = net_g.float()
80
- vc = VC(tgt_sr, config)
81
- # n_spk = cpt["config"][-3]
82
-
83
- index_files = [
84
- f"{model_root}/{model_name}/{f}"
85
- for f in os.listdir(f"{model_root}/{model_name}")
86
- if f.endswith(".index")
87
- ]
88
- if len(index_files) == 0:
89
- print("No index file found")
90
- index_file = ""
91
- else:
92
- index_file = index_files[0]
93
- print(f"Index file found: {index_file}")
94
-
95
- return tgt_sr, net_g, vc, version, index_file, if_f0
96
-
97
-
98
- def load_hubert():
99
- # global hubert_model
100
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
101
- ["hubert_base.pt"],
102
- suffix="",
103
- )
104
- hubert_model = models[0]
105
- hubert_model = hubert_model.to(config.device)
106
- if config.is_half:
107
- hubert_model = hubert_model.half()
108
- else:
109
- hubert_model = hubert_model.float()
110
- return hubert_model.eval()
111
-
112
-
113
- def tts(
114
- model_name,
115
- speed,
116
- tts_text,
117
- tts_voice,
118
- f0_up_key,
119
- f0_method,
120
- index_rate,
121
- protect,
122
- filter_radius=3,
123
- resample_sr=0,
124
- rms_mix_rate=0.25,
125
- ):
126
- print("------------------")
127
- print(datetime.datetime.now())
128
- print("tts_text:")
129
- print(tts_text)
130
- print(f"tts_voice: {tts_voice}, speed: {speed}")
131
- print(f"Model name: {model_name}")
132
- print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
133
- try:
134
- if limitation and len(tts_text) > 1000:
135
- print("Error: Text too long")
136
- return (
137
- f"Text characters should be at most 1000 in this huggingface space, but got {len(tts_text)} characters.",
138
- None,
139
- None,
140
- )
141
- t0 = time.time()
142
- if speed >= 0:
143
- speed_str = f"+{speed}%"
144
- else:
145
- speed_str = f"{speed}%"
146
- asyncio.run(
147
- edge_tts.Communicate(
148
- tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str
149
- ).save(edge_output_filename)
150
- )
151
- t1 = time.time()
152
- edge_time = t1 - t0
153
- audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
154
- duration = len(audio) / sr
155
- print(f"Audio duration: {duration}s")
156
- if limitation and duration >= 200:
157
- print("Error: Audio too long")
158
- return (
159
- f"Audio should be less than 200 seconds in this huggingface space, but got {duration}s.",
160
- edge_output_filename,
161
- None,
162
- )
163
- f0_up_key = int(f0_up_key)
164
-
165
- tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
166
- if f0_method == "rmvpe":
167
- vc.model_rmvpe = rmvpe_model
168
- times = [0, 0, 0]
169
- audio_opt = vc.pipeline(
170
- hubert_model,
171
- net_g,
172
- 0,
173
- audio,
174
- edge_output_filename,
175
- times,
176
- f0_up_key,
177
- f0_method,
178
- index_file,
179
- # file_big_npy,
180
- index_rate,
181
- if_f0,
182
- filter_radius,
183
- tgt_sr,
184
- resample_sr,
185
- rms_mix_rate,
186
- version,
187
- protect,
188
- None,
189
- )
190
- if tgt_sr != resample_sr >= 16000:
191
- tgt_sr = resample_sr
192
- info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
193
- print(info)
194
- return (
195
- info,
196
- edge_output_filename,
197
- (tgt_sr, audio_opt),
198
- )
199
- except EOFError:
200
- info = (
201
- "It seems that the edge-tts output is not valid. "
202
- "This may occur when the input text and the speaker do not match. "
203
- "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
204
- )
205
- print(info)
206
- return info, None, None
207
- except:
208
- info = traceback.format_exc()
209
- print(info)
210
- return info, None, None
211
-
212
-
213
- print("Loading hubert model...")
214
- hubert_model = load_hubert()
215
- print("Hubert model loaded.")
216
-
217
- print("Loading rmvpe model...")
218
- rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
219
- print("rmvpe model loaded.")
220
-
221
- initial_md = """
222
- ![banner that says mikutts](https://huggingface.co/spaces/NoCrypt/mikuTTS/resolve/main/imgs/banner_mikutts.webp)
223
- """
224
-
225
- app = gr.Blocks(theme='NoCrypt/miku')
226
- with app:
227
- gr.Markdown(initial_md)
228
- with gr.Row():
229
- with gr.Column():
230
- model_name = gr.Dropdown(
231
- label="Model",
232
- choices=models,
233
- value=models[0],
234
- )
235
- f0_key_up = gr.Number(
236
- label="Tune",
237
- value=6,
238
- )
239
- with gr.Column():
240
- f0_method = gr.Radio(
241
- label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)",
242
- choices=["pm", "rmvpe"], # harvest and crepe is too slow
243
- value="rmvpe",
244
- interactive=True,
245
- )
246
- index_rate = gr.Slider(
247
- minimum=0,
248
- maximum=1,
249
- label="Index rate",
250
- value=1,
251
- interactive=True,
252
- )
253
- protect0 = gr.Slider(
254
- minimum=0,
255
- maximum=0.5,
256
- label="Protect",
257
- value=0.33,
258
- step=0.01,
259
- interactive=True,
260
- )
261
- with gr.Row():
262
- with gr.Column():
263
- tts_voice = gr.Dropdown(
264
- label="Edge-tts speaker (format: language-Country-Name-Gender), make sure the gender matches the model",
265
- choices=tts_voices,
266
- allow_custom_value=False,
267
- value="ja-JP-NanamiNeural-Female",
268
- )
269
- speed = gr.Slider(
270
- minimum=-100,
271
- maximum=100,
272
- label="Speech speed (%)",
273
- value=0,
274
- step=10,
275
- interactive=True,
276
- )
277
- tts_text = gr.Textbox(label="Input Text", value="こんにちは、私の名前は初音ミクです!")
278
- with gr.Column():
279
- but0 = gr.Button("Convert", variant="primary")
280
- info_text = gr.Textbox(label="Output info")
281
- with gr.Column():
282
- with gr.Accordion("Edge Voice", open=False):
283
- edge_tts_output = gr.Audio(label="Edge Voice", type="filepath")
284
- tts_output = gr.Audio(label="Result")
285
- but0.click(
286
- tts,
287
- [
288
- model_name,
289
- speed,
290
- tts_text,
291
- tts_voice,
292
- f0_key_up,
293
- f0_method,
294
- index_rate,
295
- protect0,
296
- ],
297
- [info_text, edge_tts_output, tts_output],
298
- )
299
- with gr.Row():
300
- examples = gr.Examples(
301
- examples_per_page=100,
302
- examples=[
303
- ["こんにちは、私の名前は初音ミクです!", "ja-JP-NanamiNeural-Female", 6],
304
- ["Hello there. My name is Hatsune Miku!","en-CA-ClaraNeural-Female", 6],
305
- ["Halo. Nama saya Hatsune Miku!","id-ID-GadisNeural-Female", 4],
306
- ["Halo. Jenengku Hatsune Miku!","jv-ID-SitiNeural-Female", 10],
307
- ],
308
- inputs=[tts_text, tts_voice, f0_key_up],
309
- )
310
-
311
- app.launch()
 
 
 
 
 
 
1
+ import spaces # in windows env, delete related to "spaces"
2
+ @spaces.GPU
3
+ def gpu():
4
+ pass
5
+
6
+ import asyncio
7
+ import datetime
8
+ import logging
9
+ import os
10
+ import time
11
+ import traceback
12
+
13
+ import edge_tts
14
+ import gradio as gr
15
+ import librosa
16
+ import torch
17
+ from fairseq import checkpoint_utils
18
+ from huggingface_hub import snapshot_download
19
+
20
+
21
+ from config import Config
22
+ from lib.infer_pack.models import (
23
+ SynthesizerTrnMs256NSFsid,
24
+ SynthesizerTrnMs256NSFsid_nono,
25
+ SynthesizerTrnMs768NSFsid,
26
+ SynthesizerTrnMs768NSFsid_nono,
27
+ )
28
+ from rmvpe import RMVPE
29
+ from vc_infer_pipeline import VC
30
+
31
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
32
+ logging.getLogger("numba").setLevel(logging.WARNING)
33
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
34
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
35
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
36
+
37
+ limitation = os.getenv("SYSTEM") == "spaces"
38
+
39
+ config = Config()
40
+
41
+ # Edge TTS
42
+ edge_output_filename = "edge_output.mp3"
43
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
44
+ tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
45
+
46
+ # RVC models
47
+ model_root = snapshot_download(repo_id="NoCrypt/miku_RVC", token=os.getenv("TOKEN", None))
48
+ models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")]
49
+ models.sort()
50
+
51
+
52
+ def model_data(model_name):
53
+ # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
54
+ pth_path = [
55
+ f"{model_root}/{model_name}/{f}"
56
+ for f in os.listdir(f"{model_root}/{model_name}")
57
+ if f.endswith(".pth")
58
+ ][0]
59
+ print(f"Loading {pth_path}")
60
+ cpt = torch.load(pth_path, map_location="cpu")
61
+ tgt_sr = cpt["config"][-1]
62
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
63
+ if_f0 = cpt.get("f0", 1)
64
+ version = cpt.get("version", "v1")
65
+ if version == "v1":
66
+ if if_f0 == 1:
67
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
68
+ else:
69
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
70
+ elif version == "v2":
71
+ if if_f0 == 1:
72
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
73
+ else:
74
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
75
+ else:
76
+ raise ValueError("Unknown version")
77
+ del net_g.enc_q
78
+ net_g.load_state_dict(cpt["weight"], strict=False)
79
+ print("Model loaded")
80
+ net_g.eval().to(config.device)
81
+ if config.is_half:
82
+ net_g = net_g.half()
83
+ else:
84
+ net_g = net_g.float()
85
+ vc = VC(tgt_sr, config)
86
+ # n_spk = cpt["config"][-3]
87
+
88
+ index_files = [
89
+ f"{model_root}/{model_name}/{f}"
90
+ for f in os.listdir(f"{model_root}/{model_name}")
91
+ if f.endswith(".index")
92
+ ]
93
+ if len(index_files) == 0:
94
+ print("No index file found")
95
+ index_file = ""
96
+ else:
97
+ index_file = index_files[0]
98
+ print(f"Index file found: {index_file}")
99
+
100
+ return tgt_sr, net_g, vc, version, index_file, if_f0
101
+
102
+
103
+ def load_hubert():
104
+ # global hubert_model
105
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
106
+ ["hubert_base.pt"],
107
+ suffix="",
108
+ )
109
+ hubert_model = models[0]
110
+ hubert_model = hubert_model.to(config.device)
111
+ if config.is_half:
112
+ hubert_model = hubert_model.half()
113
+ else:
114
+ hubert_model = hubert_model.float()
115
+ return hubert_model.eval()
116
+
117
+
118
+ def tts(
119
+ model_name,
120
+ speed,
121
+ tts_text,
122
+ tts_voice,
123
+ f0_up_key,
124
+ f0_method,
125
+ index_rate,
126
+ protect,
127
+ filter_radius=3,
128
+ resample_sr=0,
129
+ rms_mix_rate=0.25,
130
+ ):
131
+ print("------------------")
132
+ print(datetime.datetime.now())
133
+ print("tts_text:")
134
+ print(tts_text)
135
+ print(f"tts_voice: {tts_voice}, speed: {speed}")
136
+ print(f"Model name: {model_name}")
137
+ print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
138
+ try:
139
+ if limitation and len(tts_text) > 1000:
140
+ print("Error: Text too long")
141
+ return (
142
+ f"Text characters should be at most 1000 in this huggingface space, but got {len(tts_text)} characters.",
143
+ None,
144
+ None,
145
+ )
146
+ t0 = time.time()
147
+ if speed >= 0:
148
+ speed_str = f"+{speed}%"
149
+ else:
150
+ speed_str = f"{speed}%"
151
+ asyncio.run(
152
+ edge_tts.Communicate(
153
+ tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str
154
+ ).save(edge_output_filename)
155
+ )
156
+ t1 = time.time()
157
+ edge_time = t1 - t0
158
+ audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
159
+ duration = len(audio) / sr
160
+ print(f"Audio duration: {duration}s")
161
+ if limitation and duration >= 200:
162
+ print("Error: Audio too long")
163
+ return (
164
+ f"Audio should be less than 200 seconds in this huggingface space, but got {duration}s.",
165
+ edge_output_filename,
166
+ None,
167
+ )
168
+ f0_up_key = int(f0_up_key)
169
+
170
+ tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
171
+ if f0_method == "rmvpe":
172
+ vc.model_rmvpe = rmvpe_model
173
+ times = [0, 0, 0]
174
+ audio_opt = vc.pipeline(
175
+ hubert_model,
176
+ net_g,
177
+ 0,
178
+ audio,
179
+ edge_output_filename,
180
+ times,
181
+ f0_up_key,
182
+ f0_method,
183
+ index_file,
184
+ # file_big_npy,
185
+ index_rate,
186
+ if_f0,
187
+ filter_radius,
188
+ tgt_sr,
189
+ resample_sr,
190
+ rms_mix_rate,
191
+ version,
192
+ protect,
193
+ None,
194
+ )
195
+ if tgt_sr != resample_sr >= 16000:
196
+ tgt_sr = resample_sr
197
+ info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
198
+ print(info)
199
+ return (
200
+ info,
201
+ edge_output_filename,
202
+ (tgt_sr, audio_opt),
203
+ )
204
+ except EOFError:
205
+ info = (
206
+ "It seems that the edge-tts output is not valid. "
207
+ "This may occur when the input text and the speaker do not match. "
208
+ "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
209
+ )
210
+ print(info)
211
+ return info, None, None
212
+ except:
213
+ info = traceback.format_exc()
214
+ print(info)
215
+ return info, None, None
216
+
217
+
218
+ print("Loading hubert model...")
219
+ hubert_model = load_hubert()
220
+ print("Hubert model loaded.")
221
+
222
+ print("Loading rmvpe model...")
223
+ rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
224
+ print("rmvpe model loaded.")
225
+
226
+ initial_md = """
227
+ ![banner that says mikutts](https://huggingface.co/spaces/NoCrypt/mikuTTS/resolve/main/imgs/banner_mikutts.webp)
228
+ """
229
+
230
+ app = gr.Blocks(theme='NoCrypt/miku')
231
+ with app:
232
+ gr.Markdown(initial_md)
233
+ with gr.Row():
234
+ with gr.Column():
235
+ model_name = gr.Dropdown(
236
+ label="Model",
237
+ choices=models,
238
+ value=models[0],
239
+ )
240
+ f0_key_up = gr.Number(
241
+ label="Tune",
242
+ value=6,
243
+ )
244
+ with gr.Column():
245
+ f0_method = gr.Radio(
246
+ label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)",
247
+ choices=["pm", "rmvpe"], # harvest and crepe is too slow
248
+ value="rmvpe",
249
+ interactive=True,
250
+ )
251
+ index_rate = gr.Slider(
252
+ minimum=0,
253
+ maximum=1,
254
+ label="Index rate",
255
+ value=1,
256
+ interactive=True,
257
+ )
258
+ protect0 = gr.Slider(
259
+ minimum=0,
260
+ maximum=0.5,
261
+ label="Protect",
262
+ value=0.33,
263
+ step=0.01,
264
+ interactive=True,
265
+ )
266
+ with gr.Row():
267
+ with gr.Column():
268
+ tts_voice = gr.Dropdown(
269
+ label="Edge-tts speaker (format: language-Country-Name-Gender), make sure the gender matches the model",
270
+ choices=tts_voices,
271
+ allow_custom_value=False,
272
+ value="ja-JP-NanamiNeural-Female",
273
+ )
274
+ speed = gr.Slider(
275
+ minimum=-100,
276
+ maximum=100,
277
+ label="Speech speed (%)",
278
+ value=0,
279
+ step=10,
280
+ interactive=True,
281
+ )
282
+ tts_text = gr.Textbox(label="Input Text", value="こんにちは、私の名前は初音ミクです!")
283
+ with gr.Column():
284
+ but0 = gr.Button("Convert", variant="primary")
285
+ info_text = gr.Textbox(label="Output info")
286
+ with gr.Column():
287
+ with gr.Accordion("Edge Voice", open=False):
288
+ edge_tts_output = gr.Audio(label="Edge Voice", type="filepath")
289
+ tts_output = gr.Audio(label="Result")
290
+ but0.click(
291
+ tts,
292
+ [
293
+ model_name,
294
+ speed,
295
+ tts_text,
296
+ tts_voice,
297
+ f0_key_up,
298
+ f0_method,
299
+ index_rate,
300
+ protect0,
301
+ ],
302
+ [info_text, edge_tts_output, tts_output],
303
+ )
304
+ with gr.Row():
305
+ examples = gr.Examples(
306
+ examples_per_page=100,
307
+ examples=[
308
+ ["こんにちは、私の名前は初音ミクです!", "ja-JP-NanamiNeural-Female", 6],
309
+ ["Hello there. My name is Hatsune Miku!","en-CA-ClaraNeural-Female", 6],
310
+ ["Halo. Nama saya Hatsune Miku!","id-ID-GadisNeural-Female", 4],
311
+ ["Halo. Jenengku Hatsune Miku!","jv-ID-SitiNeural-Female", 10],
312
+ ],
313
+ inputs=[tts_text, tts_voice, f0_key_up],
314
+ )
315
+
316
+ app.launch(ssr_mode=False)
pre-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pip==24.0
requirements.txt CHANGED
@@ -1,10 +1,12 @@
1
- edge_tts==6.1.7
2
- fairseq==0.12.2
3
- faiss_cpu==1.7.4
4
- gradio==3.38.0
5
- librosa==0.9.1
6
- numpy==1.23.5
7
- praat-parselmouth==0.4.3
8
- pyworld==0.3.4
9
- torchcrepe==0.0.20
10
- huggingface_hub
 
 
 
1
+ torch<2.6
2
+ edge_tts>=6.1.7
3
+ fairseq>=0.12.2
4
+ faiss_cpu>=1.7.4
5
+ #gradio==3.38.0
6
+ librosa>=0.9.1
7
+ numpy==1.23.5
8
+ praat-parselmouth>=0.4.3
9
+ pyworld>=0.3.4
10
+ torchcrepe>=0.0.20
11
+ huggingface_hub
12
+ # https://github.com/facebookresearch/fairseq/issues/5511