import streamlit as st import pyttsx3 import threading import wave import io import speech_recognition as sr from gradio_client import Client import streamlit.components.v1 as components # Initialize session state if "messages" not in st.session_state: st.session_state["messages"] = [] # Store chat history # Function to generate a response using Gradio client def generate_response(query): try: client = Client("Gopikanth123/llama2") result = client.predict(query=query, api_name="/predict") return result except Exception as e: return f"Error communicating with the Gradio backend: {e}" # Function to handle user input and bot response def handle_user_input(user_input): if user_input: # Add user message to session state st.session_state["messages"].append({"user": user_input}) # Generate bot response response = generate_response(user_input) st.session_state["messages"].append({"bot": response}) # Speak out bot response in a new thread to avoid blocking threading.Thread(target=speak_text, args=(response,), daemon=True).start() # Function to speak text (Voice Output) def speak_text(text): engine = pyttsx3.init() engine.stop() # Ensure no previous loop is running engine.say(text) engine.runAndWait() # Function to update chat history dynamically def update_chat_history(): chat_history = st.session_state["messages"] for msg in chat_history: if "user" in msg: st.markdown(f"
You: {msg['user']}
", unsafe_allow_html=True) if "bot" in msg: st.markdown(f"
Bot: {msg['bot']}
", unsafe_allow_html=True) # Function to recognize speech from audio received as bytes def recognize_speech_from_audio(audio_bytes): st.info("Processing audio...") # Convert byte stream to audio file audio_data = io.BytesIO(audio_bytes) recognizer = sr.Recognizer() # Recognize speech from the audio data with sr.AudioFile(audio_data) as source: audio = recognizer.record(source) try: recognized_text = recognizer.recognize_google(audio) st.session_state["user_input"] = recognized_text st.success(f"Recognized Text: {recognized_text}") handle_user_input(recognized_text) except sr.UnknownValueError: st.error("Sorry, I couldn't understand the audio.") except sr.RequestError: st.error("Could not request results; please check your internet connection.") # JavaScript for audio recording and sending data to Streamlit audio_recorder_html = """

Click the button to start/stop audio recording.

""" # Main Streamlit app st.set_page_config(page_title="Llama2 Chatbot", page_icon="🤖", layout="wide") st.markdown( """ """, unsafe_allow_html=True ) st.title("🤖 Chat with Llama2 Bot") st.markdown( """ Welcome to the *Llama2 Chatbot*! - *Type* your message below, or - *Speak* to the bot using your microphone. """ ) # Display chat history chat_history_container = st.container() with chat_history_container: # Add input field within a form with st.form(key='input_form', clear_on_submit=True): user_input = st.text_input("Type your message here...", placeholder="Hello, how are you?") submit_button = st.form_submit_button("Send") # Handle form submission if submit_button: handle_user_input(user_input) # Display JavaScript for audio recording components.html(audio_recorder_html, height=300) # Update chat history on every interaction update_chat_history() # Listening to the audio data sent by JavaScript def process_audio_data(): audio_data = st.experimental_get_query_params().get('audio', [None])[0] if audio_data: recognize_speech_from_audio(audio_data) # Call the function to process audio if available process_audio_data()