Olivier-Truong
commited on
Commit
•
8d4ef61
1
Parent(s):
fab601a
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,11 @@ print(model_names)
|
|
12 |
tts = TTS(m, gpu=False)
|
13 |
tts.to("cpu") # no GPU or Amd
|
14 |
#tts.to("cuda") # cuda only
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
|
17 |
if agree == True:
|
@@ -77,7 +82,7 @@ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
|
|
77 |
|
78 |
title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
|
79 |
|
80 |
-
description = """
|
81 |
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
|
82 |
<br/>
|
83 |
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
|
@@ -85,7 +90,7 @@ XTTS is built on previous research, like Tortoise, with additional architectural
|
|
85 |
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
|
86 |
<br/>
|
87 |
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
|
88 |
-
<br/>
|
89 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
90 |
<br/>
|
91 |
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
|
|
|
12 |
tts = TTS(m, gpu=False)
|
13 |
tts.to("cpu") # no GPU or Amd
|
14 |
#tts.to("cuda") # cuda only
|
15 |
+
br_ = """
|
16 |
+
<script>
|
17 |
+
document.writeln("aaaa");
|
18 |
+
</script>
|
19 |
+
"""
|
20 |
|
21 |
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
|
22 |
if agree == True:
|
|
|
82 |
|
83 |
title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
|
84 |
|
85 |
+
description = f"""
|
86 |
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
|
87 |
<br/>
|
88 |
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
|
|
|
90 |
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
|
91 |
<br/>
|
92 |
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
|
93 |
+
<br/>{br_}
|
94 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
95 |
<br/>
|
96 |
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
|