Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
from openai import OpenAI
|
| 3 |
import time
|
| 4 |
import os
|
|
@@ -97,6 +97,26 @@ with clear_col:
|
|
| 97 |
thread_id = get_or_create_thread_id()
|
| 98 |
display_chat_history()
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
if user_input:
|
| 101 |
# Send user message to OpenAI thread
|
| 102 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
|
@@ -115,48 +135,20 @@ if user_input:
|
|
| 115 |
assistant_message = latest_response.content[0].text.value
|
| 116 |
save_message("assistant", assistant_message)
|
| 117 |
|
| 118 |
-
#
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
function getFemaleVoice() {{
|
| 134 |
-
voices = synth.getVoices();
|
| 135 |
-
let female = voices.find(v => (v.name && v.name.toLowerCase().includes("female")) || (v.lang && v.lang.startsWith("en") && v.gender !== "male"));
|
| 136 |
-
// Fallback: any en female, then any en, then first
|
| 137 |
-
return female || voices.find(v => v.lang && v.lang.startsWith('en') && v.name && v.name.toLowerCase().includes('female')) || voices.find(v => v.lang && v.lang.startsWith('en')) || voices[0];
|
| 138 |
-
}}
|
| 139 |
-
function speakText() {{
|
| 140 |
-
if (isSpeaking) return;
|
| 141 |
-
utterance = new SpeechSynthesisUtterance(text);
|
| 142 |
-
utterance.voice = getFemaleVoice();
|
| 143 |
-
utterance.rate = 1;
|
| 144 |
-
utterance.pitch = 1.1;
|
| 145 |
-
synth.speak(utterance);
|
| 146 |
-
isSpeaking = true;
|
| 147 |
-
utterance.onend = function() {{ isSpeaking = false; }};
|
| 148 |
-
}}
|
| 149 |
-
document.getElementById('speak-btn').onclick = function() {{
|
| 150 |
-
speakText();
|
| 151 |
-
}};
|
| 152 |
-
document.getElementById('mute-btn').onclick = function() {{
|
| 153 |
-
synth.cancel();
|
| 154 |
-
isSpeaking = false;
|
| 155 |
-
}};
|
| 156 |
-
// Auto-speak on load
|
| 157 |
-
setTimeout(speakText, 500);
|
| 158 |
-
</script>
|
| 159 |
-
""", height=80)
|
| 160 |
-
|
| 161 |
time.sleep(0.5)
|
| 162 |
st.rerun()
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
from openai import OpenAI
|
| 3 |
import time
|
| 4 |
import os
|
|
|
|
| 97 |
thread_id = get_or_create_thread_id()
|
| 98 |
display_chat_history()
|
| 99 |
|
| 100 |
+
if "mute_voice" not in st.session_state:
|
| 101 |
+
st.session_state["mute_voice"] = False
|
| 102 |
+
|
| 103 |
+
def synthesize_and_play(text, mute):
|
| 104 |
+
if not mute:
|
| 105 |
+
# Only call OpenAI TTS if unmuted
|
| 106 |
+
with st.spinner("Synthesizing voice with GPT-4o..."):
|
| 107 |
+
speech_response = client.audio.speech.create(
|
| 108 |
+
model="tts-1", # 'tts-1' or 'tts-1-hd'
|
| 109 |
+
voice="nova", # or "alloy", "echo", "fable", etc.
|
| 110 |
+
input=text,
|
| 111 |
+
response_format="mp3"
|
| 112 |
+
)
|
| 113 |
+
audio_path = f"output_{user_id}.mp3"
|
| 114 |
+
with open(audio_path, "wb") as f:
|
| 115 |
+
f.write(speech_response.content)
|
| 116 |
+
st.audio(audio_path, format="audio/mp3", autoplay=True)
|
| 117 |
+
# Optionally remove the file after play to save disk
|
| 118 |
+
# os.remove(audio_path)
|
| 119 |
+
|
| 120 |
if user_input:
|
| 121 |
# Send user message to OpenAI thread
|
| 122 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
|
|
|
| 135 |
assistant_message = latest_response.content[0].text.value
|
| 136 |
save_message("assistant", assistant_message)
|
| 137 |
|
| 138 |
+
# π Voice controls (auto-speak enabled, can mute)
|
| 139 |
+
col1, col2 = st.columns([1, 1])
|
| 140 |
+
with col1:
|
| 141 |
+
if st.button("π Play Voice", key="unmute"):
|
| 142 |
+
st.session_state["mute_voice"] = False
|
| 143 |
+
synthesize_and_play(assistant_message, False)
|
| 144 |
+
with col2:
|
| 145 |
+
if st.button("π Mute Voice", key="mute"):
|
| 146 |
+
st.session_state["mute_voice"] = True
|
| 147 |
+
st.info("Voice output muted for this and future messages.")
|
| 148 |
+
|
| 149 |
+
# Play voice unless muted
|
| 150 |
+
synthesize_and_play(assistant_message, st.session_state["mute_voice"])
|
| 151 |
+
|
| 152 |
+
# Force Streamlit to rerun so chat refreshes and you get a new prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
time.sleep(0.5)
|
| 154 |
st.rerun()
|