Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from openai import OpenAI | |
| import time | |
| import os | |
| import uuid | |
| import firebase_admin | |
| from firebase_admin import credentials, firestore | |
| # π Firebase setup | |
| if not firebase_admin._apps: | |
| cred = credentials.Certificate("firebase-service-account.json") | |
| firebase_admin.initialize_app(cred) | |
| db = firestore.client() | |
| # π OpenAI setup | |
| openai_key = os.getenv("openai_key") | |
| assistant_id = os.getenv("assistant_id") | |
| client = OpenAI(api_key=openai_key) | |
| # π Streamlit Config | |
| st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide") | |
| # π― Session + User ID | |
| if "user_id" not in st.session_state: | |
| st.session_state["user_id"] = str(uuid.uuid4()) | |
| user_id = st.session_state["user_id"] | |
| # πΌοΈ LORTech Branding + Styling | |
| st.markdown(""" | |
| <style> | |
| .block-container {padding-top: 1rem; padding-bottom: 0rem;} | |
| header {visibility: hidden;} | |
| .stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; } | |
| .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; } | |
| .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; } | |
| .lt-logo { vertical-align: middle; } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| st.markdown(""" | |
| <div style='text-align: center; margin-top: 20px; margin-bottom: -10px;'> | |
| <span style='display: inline-flex; align-items: center; gap: 8px;'> | |
| <img src='https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg' width='100' class='lor-logo'/> | |
| <span style='font-size: 12px; color: gray;'>Powered by LOR Technologies</span> | |
| </span> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # π Get or create a thread ID | |
| def get_or_create_thread_id(): | |
| doc_ref = db.collection("users").document(user_id) | |
| doc = doc_ref.get() | |
| if doc.exists: | |
| return doc.to_dict()["thread_id"] | |
| else: | |
| thread = client.beta.threads.create() | |
| doc_ref.set({"thread_id": thread.id, "created_at": firestore.SERVER_TIMESTAMP}) | |
| return thread.id | |
| # πΎ Save a message | |
| def save_message(role, content): | |
| db.collection("users").document(user_id).collection("messages").add({ | |
| "role": role, | |
| "content": content, | |
| "timestamp": firestore.SERVER_TIMESTAMP | |
| }) | |
| # π¬ Display chat history | |
| def display_chat_history(): | |
| messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream() | |
| assistant_icon_html = "<img src='https://huggingface.co/spaces/IAMTFRMZA/lortechassistant/blob/main/lorain.jpg' width='20' style='vertical-align:middle;'/>" | |
| for msg in list(messages)[::-1]: | |
| data = msg.to_dict() | |
| if data["role"] == "user": | |
| st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-user'>π€ <strong>You:</strong> {data['content']}</div>", unsafe_allow_html=True) | |
| else: | |
| st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>", unsafe_allow_html=True) | |
| # π Main Chat UI | |
| input_col, clear_col = st.columns([9, 1]) | |
| with input_col: | |
| user_input = st.chat_input("Type your message here...") | |
| with clear_col: | |
| if st.button("ποΈ", key="clear-chat", help="Clear Chat"): | |
| try: | |
| user_doc_ref = db.collection("users").document(user_id) | |
| for msg in user_doc_ref.collection("messages").stream(): | |
| msg.reference.delete() | |
| user_doc_ref.delete() | |
| st.session_state.clear() | |
| st.rerun() | |
| except Exception as e: | |
| st.error(f"Failed to clear chat: {e}") | |
| thread_id = get_or_create_thread_id() | |
| display_chat_history() | |
| if "mute_voice" not in st.session_state: | |
| st.session_state["mute_voice"] = False | |
| def synthesize_and_play(text, mute): | |
| if not mute: | |
| # Only call OpenAI TTS if unmuted | |
| with st.spinner("Synthesizing voice with GPT-4o..."): | |
| speech_response = client.audio.speech.create( | |
| model="tts-1", # 'tts-1' or 'tts-1-hd' | |
| voice="nova", # or "alloy", "echo", "fable", etc. | |
| input=text, | |
| response_format="mp3" | |
| ) | |
| audio_path = f"output_{user_id}.mp3" | |
| with open(audio_path, "wb") as f: | |
| f.write(speech_response.content) | |
| st.audio(audio_path, format="audio/mp3", autoplay=True) | |
| # Optionally remove the file after play to save disk | |
| # os.remove(audio_path) | |
| if user_input: | |
| # Send user message to OpenAI thread | |
| client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input) | |
| save_message("user", user_input) | |
| with st.spinner("Thinking and typing... π"): | |
| run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id) | |
| while True: | |
| run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id) | |
| if run_status.status == "completed": | |
| break | |
| time.sleep(1) | |
| messages_response = client.beta.threads.messages.list(thread_id=thread_id) | |
| latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1] | |
| assistant_message = latest_response.content[0].text.value | |
| save_message("assistant", assistant_message) | |
| # π Voice controls (auto-speak enabled, can mute) | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| if st.button("π Play Voice", key="unmute"): | |
| st.session_state["mute_voice"] = False | |
| synthesize_and_play(assistant_message, False) | |
| with col2: | |
| if st.button("π Mute Voice", key="mute"): | |
| st.session_state["mute_voice"] = True | |
| st.info("Voice output muted for this and future messages.") | |
| # Play voice unless muted | |
| synthesize_and_play(assistant_message, st.session_state["mute_voice"]) | |
| # Force Streamlit to rerun so chat refreshes and you get a new prompt | |
| time.sleep(0.5) | |
| st.rerun() | |