check / app.py
Gopikanth123's picture
Update app.py
a999793 verified
raw
history blame
6.51 kB
import streamlit as st
import pyttsx3
import threading
import wave
import io
import speech_recognition as sr
from gradio_client import Client
import streamlit.components.v1 as components
# Initialize session state
if "messages" not in st.session_state:
st.session_state["messages"] = [] # Store chat history
# Function to generate a response using Gradio client
def generate_response(query):
try:
client = Client("Gopikanth123/llama2")
result = client.predict(query=query, api_name="/predict")
return result
except Exception as e:
return f"Error communicating with the Gradio backend: {e}"
# Function to handle user input and bot response
def handle_user_input(user_input):
if user_input:
# Add user message to session state
st.session_state["messages"].append({"user": user_input})
# Generate bot response
response = generate_response(user_input)
st.session_state["messages"].append({"bot": response})
# Speak out bot response in a new thread to avoid blocking
threading.Thread(target=speak_text, args=(response,), daemon=True).start()
# Function to speak text (Voice Output)
def speak_text(text):
engine = pyttsx3.init()
engine.stop() # Ensure no previous loop is running
engine.say(text)
engine.runAndWait()
# Function to update chat history dynamically
def update_chat_history():
chat_history = st.session_state["messages"]
for msg in chat_history:
if "user" in msg:
st.markdown(f"<div class='chat-bubble user-message'><strong>You:</strong> {msg['user']}</div>", unsafe_allow_html=True)
if "bot" in msg:
st.markdown(f"<div class='chat-bubble bot-message'><strong>Bot:</strong> {msg['bot']}</div>", unsafe_allow_html=True)
# Function to recognize speech from audio received as bytes
def recognize_speech_from_audio(audio_bytes):
st.info("Processing audio...")
# Convert byte stream to audio file
audio_data = io.BytesIO(audio_bytes)
recognizer = sr.Recognizer()
# Recognize speech from the audio data
with sr.AudioFile(audio_data) as source:
audio = recognizer.record(source)
try:
recognized_text = recognizer.recognize_google(audio)
st.session_state["user_input"] = recognized_text
st.success(f"Recognized Text: {recognized_text}")
handle_user_input(recognized_text)
except sr.UnknownValueError:
st.error("Sorry, I couldn't understand the audio.")
except sr.RequestError:
st.error("Could not request results; please check your internet connection.")
# JavaScript for audio recording and sending data to Streamlit
audio_recorder_html = """
<script>
let audioChunks = [];
let mediaRecorder;
function startRecording() {
navigator.mediaDevices.getUserMedia({ audio: true })
.then(function(stream) {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = function(event) {
audioChunks.push(event.data);
};
mediaRecorder.onstop = function() {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
const reader = new FileReader();
reader.onloadend = function() {
const audioBase64 = reader.result.split(',')[1];
window.parent.postMessage({ 'type': 'audio_data', 'audio': audioBase64 }, '*');
};
reader.readAsDataURL(audioBlob);
};
mediaRecorder.start();
});
}
function stopRecording() {
mediaRecorder.stop();
}
function handleStartStop() {
if (mediaRecorder && mediaRecorder.state === "recording") {
stopRecording();
} else {
startRecording();
}
}
</script>
<button onclick="handleStartStop()">Start/Stop Recording</button>
<p>Click the button to start/stop audio recording.</p>
"""
# Main Streamlit app
st.set_page_config(page_title="Llama2 Chatbot", page_icon="πŸ€–", layout="wide")
st.markdown(
"""
<style>
.stButton>button {
background-color: #6C63FF;
color: white;
font-size: 16px;
border-radius: 10px;
padding: 10px 20px;
}
.stTextInput>div>input {
border: 2px solid #6C63FF;
border-radius: 10px;
padding: 10px;
}
.chat-container {
background-color: #F7F9FC;
padding: 20px;
border-radius: 15px;
max-height: 400px;
overflow-y: auto;
}
.chat-bubble {
padding: 10px 15px;
border-radius: 15px;
margin: 5px 0;
max-width: 80%;
display: inline-block;
}
.user-message {
background-color: #D1C4E9;
text-align: left;
margin-left: auto;
}
.bot-message {
background-color: #BBDEFB;
text-align: left;
margin-right: auto;
}
.input-container {
display: flex;
justify-content: space-between;
gap: 10px;
padding: 10px 0;
}
</style>
""",
unsafe_allow_html=True
)
st.title("πŸ€– Chat with Llama2 Bot")
st.markdown(
"""
Welcome to the *Llama2 Chatbot*!
- *Type* your message below, or
- *Speak* to the bot using your microphone.
"""
)
# Display chat history
chat_history_container = st.container()
with chat_history_container:
# Add input field within a form
with st.form(key='input_form', clear_on_submit=True):
user_input = st.text_input("Type your message here...", placeholder="Hello, how are you?")
submit_button = st.form_submit_button("Send")
# Handle form submission
if submit_button:
handle_user_input(user_input)
# Display JavaScript for audio recording
components.html(audio_recorder_html, height=300)
# Update chat history on every interaction
update_chat_history()
# Listening to the audio data sent by JavaScript
def process_audio_data():
audio_data = st.experimental_get_query_params().get('audio', [None])[0]
if audio_data:
recognize_speech_from_audio(audio_data)
# Call the function to process audio if available
process_audio_data()