File size: 7,554 Bytes
a72c43d
 
 
 
 
 
a605ca0
a72c43d
9f97307
 
 
 
 
 
a72c43d
 
 
9f97307
a605ca0
9f97307
a72c43d
 
 
 
 
 
9f97307
 
a72c43d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f97307
 
 
4a52fb2
58e4ac1
8e0957b
58e4ac1
 
a605ca0
58e4ac1
 
 
 
9f97307
 
 
 
a72c43d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import spaces
from kokoro import KModel, KPipeline
import gradio as gr
import os
import random
import torch
from urllib.parse import quote

print(os.system("""
cd front;
npm ci;
npm run build;
cd ..;
"""))

CHAR_LIMIT = 5000 # test

SPACE_ID = os.environ.get('SPACE_ID')
LLM_ENDPOINT = os.environ.get('LLM_ENDPOINT', 'null')

CUDA_AVAILABLE = torch.cuda.is_available()
models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
pipelines = {lang_code: KPipeline(lang_code=lang_code, model=False) for lang_code in 'ab'}
pipelines['a'].g2p.lexicon.golds['kokoro'] = 'kหˆOkษ™ษนO'
pipelines['b'].g2p.lexicon.golds['kokoro'] = 'kหˆQkษ™ษนQ'

gr.set_static_paths(paths=["./front/dist"])

@spaces.GPU(duration=30)
def forward_gpu(ps, ref_s, speed):
    return models[True](ps, ref_s, speed)

def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
    text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
    pipeline = pipelines[voice[0]]
    pack = pipeline.load_voice(voice)
    use_gpu = use_gpu and CUDA_AVAILABLE
    for _, ps, _ in pipeline(text, voice, speed):
        ref_s = pack[len(ps)-1]
        try:
            if use_gpu:
                audio = forward_gpu(ps, ref_s, speed)
            else:
                audio = models[False](ps, ref_s, speed)
        except gr.exceptions.Error as e:
            if use_gpu:
                gr.Warning(str(e))
                gr.Info('Retrying with CPU. To avoid this error, change Hardware to CPU.')
                audio = models[False](ps, ref_s, speed)
            else:
                raise gr.Error(e)
        return (24000, audio.numpy()), ps
    return None, ''

# Arena API
def predict(text, voice='af_heart', speed=1):
    return generate_first(text, voice, speed, use_gpu=False)[0]

def tokenize_first(text, voice='af_heart'):
    pipeline = pipelines[voice[0]]
    for _, ps, _ in pipeline(text, voice):
        return ps
    return ''

def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
    text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
    pipeline = pipelines[voice[0]]
    pack = pipeline.load_voice(voice)
    use_gpu = use_gpu and CUDA_AVAILABLE
    first = True
    for _, ps, _ in pipeline(text, voice, speed):
        ref_s = pack[len(ps)-1]
        try:
            if use_gpu:
                audio = forward_gpu(ps, ref_s, speed)
            else:
                audio = models[False](ps, ref_s, speed)
        except gr.exceptions.Error as e:
            if use_gpu:
                gr.Warning(str(e))
                gr.Info('Switching to CPU')
                audio = models[False](ps, ref_s, speed)
            else:
                raise gr.Error(e)
        yield 24000, audio.numpy()
        if first:
            first = False
            yield 24000, torch.zeros(1).numpy()

CHOICES = {
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Heart โค๏ธ': 'af_heart',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Bella ๐Ÿ”ฅ': 'af_bella',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Nicole ๐ŸŽง': 'af_nicole',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Aoede': 'af_aoede',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Kore': 'af_kore',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Sarah': 'af_sarah',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Nova': 'af_nova',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Sky': 'af_sky',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Alloy': 'af_alloy',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Jessica': 'af_jessica',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ River': 'af_river',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Michael': 'am_michael',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Fenrir': 'am_fenrir',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Puck': 'am_puck',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Echo': 'am_echo',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Eric': 'am_eric',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Liam': 'am_liam',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Onyx': 'am_onyx',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Santa': 'am_santa',
'๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Adam': 'am_adam',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Emma': 'bf_emma',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Isabella': 'bf_isabella',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Alice': 'bf_alice',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Lily': 'bf_lily',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน George': 'bm_george',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Fable': 'bm_fable',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Lewis': 'bm_lewis',
'๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Daniel': 'bm_daniel',
}
for v in CHOICES.values():
    pipelines[v[0]].load_voice(v)

TOKEN_NOTE = '''
๐Ÿ’ก Customize pronunciation with Markdown link syntax and /slashes/ like `[Kokoro](/kหˆOkษ™ษนO/)`

๐Ÿ’ฌ To adjust intonation, try punctuation `;:,.!?โ€”โ€ฆ"()โ€œโ€` or stress `หˆ` and `หŒ`

โฌ‡๏ธ Lower stress `[1 level](-1)` or `[2 levels](-2)`

โฌ†๏ธ Raise stress 1 level `[or](+2)` 2 levels (only works on less stressed, usually short words)
'''

with gr.Blocks() as generate_tab:
    out_audio = gr.Audio(label='Output Audio', interactive=False, streaming=False, autoplay=True)
    generate_btn = gr.Button('Generate', variant='primary')
    with gr.Accordion('Output Tokens', open=True):
        out_ps = gr.Textbox(interactive=False, show_label=False, info='Tokens used to generate the audio, up to 510 context length.')
        tokenize_btn = gr.Button('Tokenize', variant='secondary')
        gr.Markdown(TOKEN_NOTE)
        predict_btn = gr.Button('Predict', variant='secondary', visible=False)

STREAM_NOTE = ['โš ๏ธ There is an unknown Gradio bug that might yield no audio the first time you click `Stream`.']
if CHAR_LIMIT is not None:
    STREAM_NOTE.append(f'โœ‚๏ธ Each stream is capped at {CHAR_LIMIT} characters.')
    STREAM_NOTE.append('๐Ÿš€ Want more characters? You can [use Kokoro directly](https://huggingface.co/hexgrad/Kokoro-82M#usage) or duplicate this space:')
STREAM_NOTE = '\n\n'.join(STREAM_NOTE)

with gr.Blocks() as stream_tab:
    out_stream = gr.Audio(label='Output Audio Stream', interactive=False, streaming=True, autoplay=True)
    with gr.Row():
        stream_btn = gr.Button('Stream', variant='primary')
        stop_btn = gr.Button('Stop', variant='stop')
    with gr.Accordion('Note', open=True):
        gr.Markdown(STREAM_NOTE)
        gr.DuplicateButton()

API_NAME = 'tts'


head = f'''
<script>
    document.addEventListener('DOMContentLoaded', () => {{
        console.log('DOM content loaded');
        if (!localStorage.getItem('debug') && !window.location.href.match(/debug=1/)) {{
            console.log('Attaching frontend app');
            const frontendApp = document.createElement('iframe');
            frontendApp.src = '/gradio_api/file=./front/dist/index.html?SPACE_ID={quote(SPACE_ID)}&LLM_ENDPOINT={quote(LLM_ENDPOINT)}';
            frontendApp.style = 'position: fixed; top: 0; left: 0; width: 100%; height: 100%; border: none; z-index: 999999;';
            document.body.appendChild(frontendApp);
        }}
    }});
</script>
'''

with gr.Blocks(head=head) as app:
    with gr.Row():
        with gr.Column():
            text = gr.Textbox(label='Input Text', info=f"Up to ~500 characters per Generate, or {'โˆž' if CHAR_LIMIT is None else CHAR_LIMIT} characters per Stream")
            voice = gr.Dropdown(list(CHOICES.items()), value='af_heart', label='Voice', info='Quality and availability vary by language')
            speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
        with gr.Column():
            gr.TabbedInterface([generate_tab, stream_tab], ['Generate', 'Stream'])
    generate_btn.click(fn=generate_first, inputs=[text, voice, speed], outputs=[out_audio, out_ps], api_name=API_NAME)
    tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
    stream_event = stream_btn.click(fn=generate_all, inputs=[text, voice, speed], outputs=[out_stream], api_name=API_NAME)
    stop_btn.click(fn=None, cancels=stream_event)
    predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)

if __name__ == '__main__':
    app.queue(api_open=True).launch(show_api=True, ssr_mode=True)