check / app.py
Gopikanth123's picture
Update app.py
2aeecdb verified
raw
history blame
4.95 kB
import streamlit as st
from transformers import pipeline
import numpy as np
import threading
from gradio_client import Client
from streamlit_audio_recorder import st_audiorec
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state["messages"] = [] # Store chat history
# Load the ASR model using the Hugging Face transformers pipeline
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
# Function to generate a response using Gradio client
def generate_response(query):
try:
client = Client("Gopikanth123/llama2")
result = client.predict(query=query, api_name="/predict")
return result
except Exception as e:
return f"Error communicating with the Gradio backend: {e}"
# Function to handle user input and bot response
def handle_user_input(user_input):
if user_input:
# Add user message to session state
st.session_state["messages"].append({"user": user_input})
# Generate bot response
response = generate_response(user_input)
st.session_state["messages"].append({"bot": response})
# Speak out bot response in a new thread to avoid blocking
threading.Thread(target=speak_text, args=(response,), daemon=True).start()
# Function to speak text (Voice Output)
def speak_text(text):
import pyttsx3
engine = pyttsx3.init()
engine.stop() # Ensure no previous loop is running
engine.say(text)
engine.runAndWait()
# Function to update chat history dynamically
def update_chat_history():
chat_history = st.session_state["messages"]
for msg in chat_history:
if "user" in msg:
st.markdown(f"<div class='chat-bubble user-message'><strong>You:</strong> {msg['user']}</div>", unsafe_allow_html=True)
if "bot" in msg:
st.markdown(f"<div class='chat-bubble bot-message'><strong>Bot:</strong> {msg['bot']}</div>", unsafe_allow_html=True)
# Function to process and transcribe audio
def transcribe_audio(audio_data, sr):
# Normalize audio to float32
audio_data = audio_data.astype(np.float32)
audio_data /= np.max(np.abs(audio_data))
# Use the ASR model to transcribe the audio
transcription = transcriber({"sampling_rate": sr, "raw": audio_data})["text"]
return transcription
# Main Streamlit app
st.set_page_config(page_title="Llama2 Chatbot", page_icon="πŸ€–", layout="wide")
st.markdown(
"""
<style>
.stButton>button {
background-color: #6C63FF;
color: white;
font-size: 16px;
border-radius: 10px;
padding: 10px 20px;
}
.stTextInput>div>input {
border: 2px solid #6C63FF;
border-radius: 10px;
padding: 10px;
}
.chat-container {
background-color: #F7F9FC;
padding: 20px;
border-radius: 15px;
max-height: 400px;
overflow-y: auto;
}
.chat-bubble {
padding: 10px 15px;
border-radius: 15px;
margin: 5px 0;
max-width: 80%;
display: inline-block;
}
.user-message {
background-color: #D1C4E9;
text-align: left;
margin-left: auto;
}
.bot-message {
background-color: #BBDEFB;
text-align: left;
margin-right: auto;
}
.input-container {
display: flex;
justify-content: space-between;
gap: 10px;
padding: 10px 0;
}
</style>
""",
unsafe_allow_html=True
)
st.title("πŸ€– Chat with Llama2 Bot")
st.markdown(
"""
Welcome to the *Llama2 Chatbot*!
- *Type* your message below, or
- *Use the microphone* to speak to the bot.
"""
)
# Display chat history
chat_history_container = st.container()
with chat_history_container:
# Add input field within a form
with st.form(key='input_form', clear_on_submit=True):
user_input = st.text_input("Type your message here...", placeholder="Hello, how are you?")
submit_button = st.form_submit_button("Send")
# Handle form submission
if submit_button:
handle_user_input(user_input)
# Separate button for speech recognition outside of the form
if st.button("Speak"):
# Record and process the speech using Streamlit Audio Recorder
audio_data, sr = st_audiorec()
if audio_data is not None:
st.audio(audio_data, format="audio/wav")
# Convert to numpy array
audio_np = np.array(audio_data)
# Transcribe the audio
transcription = transcribe_audio(audio_np, sr)
# Display the recognized text
st.session_state["user_input"] = transcription
st.success(f"Recognized Text: {transcription}")
handle_user_input(transcription)
st.markdown("### Chat History")
# Update chat history on every interaction
update_chat_history()