Mira1sen commited on
Commit
1333844
·
verified ·
1 Parent(s): f2f6a2b

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Gradio
3
- app_file: tts_gradio.py
4
  sdk: gradio
5
  sdk_version: 4.36.1
6
  ---
 
1
  ---
2
+ title: gradio
3
+ app_file: gradio_demo.py
4
  sdk: gradio
5
  sdk_version: 4.36.1
6
  ---
__pycache__/tts_gradio.cpython-312.pyc ADDED
Binary file (7 kB). View file
 
test99.mp3 CHANGED
Binary files a/test99.mp3 and b/test99.mp3 differ
 
tts_gradio.py CHANGED
@@ -9,19 +9,36 @@ import json
9
  import os
10
  import re
11
  import tempfile
12
- import librosa
13
- import numpy as np
14
  # import torch
15
  # from torch import no_grad, LongTensor
16
  # import commons
17
  import gradio as gr
18
- import gradio.utils as gr_utils
19
- import gradio.processing_utils as gr_processing_utils
20
 
21
- all_example = "my voice is my passport verify me."
22
 
23
  microsoft_model_list = [
24
- "en-US-AvaMultilingualNeural"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ]
26
 
27
  openai_model_list = [
@@ -139,8 +156,8 @@ def openai(text, name):
139
 
140
  json_data = {
141
  'model': 'tts-1-hd',
142
- 'input': f'{text}',
143
- 'voice': f'{name}',
144
  }
145
 
146
  response = requests.post('https://api.openai.com/v1/audio/speech', headers=headers, json=json_data)
@@ -164,7 +181,7 @@ def elevenlabs(text,name):
164
  }
165
 
166
  data = {
167
- "text": f"{text}",
168
  "model_id": "eleven_monolingual_v1",
169
  "voice_settings": {
170
  "stability": 0.5,
@@ -204,7 +221,7 @@ def microsoft(text, name, style="Neural"):
204
  headers=headers,
205
  data=data,
206
  )
207
- return "Success", "sss"
208
 
209
  if __name__ == '__main__':
210
  parser = argparse.ArgumentParser()
@@ -216,7 +233,7 @@ if __name__ == '__main__':
216
 
217
  app = gr.Blocks()
218
  with app:
219
- gr.Markdown("## Japanese TTS Demo")
220
  with gr.Tabs():
221
 
222
  with gr.TabItem("11Labs"):
@@ -246,7 +263,7 @@ if __name__ == '__main__':
246
  tts_submit.click(openai, [tts_input1, tts_input2],
247
  [tts_output1, tts_output2])
248
 
249
- app.queue(max_size=10)
250
  app.launch(share=True)
251
  # _, audio = openai(all_example,'alloy')
252
  # print(audio)
 
9
  import os
10
  import re
11
  import tempfile
12
+ # import librosa
13
+ # import numpy as np
14
  # import torch
15
  # from torch import no_grad, LongTensor
16
  # import commons
17
  import gradio as gr
18
+ # import gradio.utils as gr_utils
19
+ # import gradio.processing_utils as gr_processing_utils
20
 
21
+ all_example = "Today is a wonderful day to build something people love!"
22
 
23
  microsoft_model_list = [
24
+ "en-US-JennyMultilingualNeural",
25
+ "en-US-RyanMultilingualNeural",
26
+ "en-US-AndrewMultilingualNeural",
27
+ "en-US-AvaMultilingualNeural",
28
+ "en-US-BrianMultilingualNeural",
29
+ "en-US-EmmaMultilingualNeural",
30
+ "en-US-AlloyMultilingualNeural",
31
+ "en-US-EchoMultilingualNeural",
32
+ "en-US-FableMultilingualNeural",
33
+ "en-US-OnyxMultilingualNeural",
34
+ "en-US-NovaMultilingualNeural",
35
+ "en-US-ShimmerMultilingualNeural",
36
+ "en-US-AlloyMultilingualNeuralHD",
37
+ "en-US-EchoMultilingualNeuralHD",
38
+ "en-US-FableMultilingualNeuralHD",
39
+ "en-US-OnyxMultilingualNeuralHD",
40
+ "en-US-NovaMultilingualNeuralHD4",
41
+ "en-US-ShimmerMultilingualNeuralHD"
42
  ]
43
 
44
  openai_model_list = [
 
156
 
157
  json_data = {
158
  'model': 'tts-1-hd',
159
+ 'input': text,
160
+ 'voice': name,
161
  }
162
 
163
  response = requests.post('https://api.openai.com/v1/audio/speech', headers=headers, json=json_data)
 
181
  }
182
 
183
  data = {
184
+ "text": text,
185
  "model_id": "eleven_monolingual_v1",
186
  "voice_settings": {
187
  "stability": 0.5,
 
221
  headers=headers,
222
  data=data,
223
  )
224
+ return "Success", response
225
 
226
  if __name__ == '__main__':
227
  parser = argparse.ArgumentParser()
 
233
 
234
  app = gr.Blocks()
235
  with app:
236
+ gr.Markdown("## English TTS Demo")
237
  with gr.Tabs():
238
 
239
  with gr.TabItem("11Labs"):
 
263
  tts_submit.click(openai, [tts_input1, tts_input2],
264
  [tts_output1, tts_output2])
265
 
266
+ # app.queue(max_size=10)
267
  app.launch(share=True)
268
  # _, audio = openai(all_example,'alloy')
269
  # print(audio)