Spaces:
Build error
Build error
import gradio as gr | |
from supabase import create_client, Client | |
import os | |
import requests | |
import json | |
# ====================================================================== | |
# --- SETUP AND CONFIGURATION --- | |
# ====================================================================== | |
# Retrieve Supabase credentials from environment variables (Hugging Face Space secrets) | |
# This is a secure way to store your API keys and other sensitive information. | |
try: | |
supabase_url = os.environ.get("SUPABASE_URL") | |
supabase_key = os.environ.get("SUPABASE_KEY") | |
supabase: Client = create_client(supabase_url, supabase_key) | |
except Exception as e: | |
print(f"Error initializing Supabase client: {e}") | |
supabase = None | |
# Set the endpoint for your voice generation engine | |
# Make sure to replace this with the actual URL from your RunPod instance. | |
CHATTTERBOX_ENDPOINT = os.environ.get("CHATTERBOX_ENDPOINT") | |
# ====================================================================== | |
# --- CORE LOGIC --- | |
# ====================================================================== | |
def process_voice_command(audio): | |
""" | |
This function handles the core logic of the voice assistant. | |
It takes an audio file, transcribes it, gets a response from a language model, | |
saves the interaction to Supabase, and generates an audio response. | |
""" | |
# Placeholder for the actual transcription logic | |
# You will replace this with a call to your speech-to-text model. | |
transcribed_text = "Placeholder: Your transcribed text will appear here." | |
# Placeholder for the response from the language model | |
# You will replace this with a call to your Gemma model. | |
model_response = "Placeholder: This is Gemma's generated response." | |
# Save the chat to Supabase | |
if supabase: | |
try: | |
# We are using a dummy table name 'chats', make sure this matches your Supabase table. | |
supabase.table("chats").insert({ | |
"user_input": transcribed_text, | |
"model_response": model_response | |
}).execute() | |
print("Chat successfully saved to Supabase! πΎ") | |
except Exception as e: | |
print(f"Error saving chat to Supabase: {e}") | |
else: | |
print("Supabase client not initialized. Skipping database save.") | |
# Placeholder for the voice generation logic using Chatterbox | |
# You will replace this with an actual API call to your Chatterbox endpoint. | |
if CHATTTERBOX_ENDPOINT: | |
try: | |
# Example API call structure | |
payload = {"text": model_response} | |
headers = {"Content-Type": "application/json"} | |
requests.post(CHATTTERBOX_ENDPOINT, data=json.dumps(payload), headers=headers) | |
print("Response sent to Chatterbox endpoint! ποΈ") | |
except Exception as e: | |
print(f"Error sending data to Chatterbox: {e}") | |
else: | |
print("Chatterbox endpoint not set. Skipping voice generation.") | |
# Return the transcription and the response to the Gradio interface. | |
return transcribed_text, model_response | |
# ====================================================================== | |
# --- GRADIO INTERFACE --- | |
# ====================================================================== | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=process_voice_command, | |
inputs=gr.Audio(sources=["microphone"], label="Speak your command here..."), | |
outputs=[ | |
gr.Textbox(label="Transcription"), | |
gr.Textbox(label="AI's Response") | |
], | |
title="My Personal Voice Assistant", | |
description="Speak into the microphone and get a response from the AI, with chats saved to your Supabase database." | |
) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
iface.launch() |