Update TextGen/router.py
Browse files- TextGen/router.py +41 -22
TextGen/router.py
CHANGED
|
@@ -169,9 +169,9 @@ async def generate_wav(message: VoiceMessage):
|
|
| 169 |
|
| 170 |
|
| 171 |
|
| 172 |
-
@app.get("/
|
| 173 |
-
@app.post("/
|
| 174 |
-
def
|
| 175 |
global Last_message # Declare Last_message as global
|
| 176 |
if message is None:
|
| 177 |
message = Last_message
|
|
@@ -184,25 +184,44 @@ def generate_voice(message: VoiceMessage = None):
|
|
| 184 |
yield chunk
|
| 185 |
|
| 186 |
return StreamingResponse(audio_stream(), media_type="audio/mpeg")
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
@app.get("/generate_song")
|
| 189 |
async def generate_song(text: str):
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
"prompt": f"{text}",
|
| 193 |
-
"make_instrumental": False,
|
| 194 |
-
"wait_audio": False
|
| 195 |
})
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
# sleep 5s
|
| 206 |
-
time.sleep(5)
|
| 207 |
-
except:
|
| 208 |
-
print("Error")
|
|
|
|
| 169 |
|
| 170 |
|
| 171 |
|
| 172 |
+
@app.get("/generate_voice_eleven", response_class=StreamingResponse)
|
| 173 |
+
@app.post("/generate_voice_eleven", response_class=StreamingResponse)
|
| 174 |
+
def generate_voice_eleven(message: VoiceMessage = None):
|
| 175 |
global Last_message # Declare Last_message as global
|
| 176 |
if message is None:
|
| 177 |
message = Last_message
|
|
|
|
| 184 |
yield chunk
|
| 185 |
|
| 186 |
return StreamingResponse(audio_stream(), media_type="audio/mpeg")
|
| 187 |
+
@app.get("/generate_voice_coqui", response_class=StreamingResponse)
|
| 188 |
+
@app.post("/generate_voice_coqui", response_class=StreamingResponse)
|
| 189 |
+
def generate_voice_coqui(message: VoiceMessage = None):
|
| 190 |
+
global Last_message
|
| 191 |
+
if message is None:
|
| 192 |
+
message = Last_message
|
| 193 |
+
else:
|
| 194 |
+
Last_message = message
|
| 195 |
+
|
| 196 |
+
def audio_stream():
|
| 197 |
+
voice = determine_vocie_from_npc(message.npc, message.genre)
|
| 198 |
+
result = predict(
|
| 199 |
+
prompt=message.input,
|
| 200 |
+
language=message.language,
|
| 201 |
+
audio_file_pth=voice,
|
| 202 |
+
mic_file_path=None,
|
| 203 |
+
use_mic=False,
|
| 204 |
+
voice_cleanup=False,
|
| 205 |
+
no_lang_auto_detect=False,
|
| 206 |
+
agree=True,
|
| 207 |
+
)
|
| 208 |
+
# Generate the audio stream from ElevenLabs
|
| 209 |
+
for chunk in result:
|
| 210 |
+
print("received : ",chunk)
|
| 211 |
+
yield chunk
|
| 212 |
+
|
| 213 |
+
return StreamingResponse(audio_stream(),media_type="audio/mpeg")
|
| 214 |
@app.get("/generate_song")
|
| 215 |
async def generate_song(text: str):
|
| 216 |
+
song_lyrics=generate_lyrics({
|
| 217 |
+
"prompt": f"{text}",
|
|
|
|
|
|
|
|
|
|
| 218 |
})
|
| 219 |
+
data = custom_generate_audio({
|
| 220 |
+
"prompt": song_lyrics['text'],
|
| 221 |
+
"tags": "male bard",
|
| 222 |
+
"title":"Everchangin_Quest_song",
|
| 223 |
+
"wait_audio":True,
|
| 224 |
+
|
| 225 |
+
})
|
| 226 |
+
infos=get_audio_information(f"{data[0]['id']},{data[1]['id']}")
|
| 227 |
+
return infos
|
|
|
|
|
|
|
|
|
|
|
|