Spaces:
Sleeping
Sleeping
first:
Browse files- .env.example +1 -0
- .gitignore +2 -0
- app.py +163 -0
- openai_transcription_settings.json +18 -0
- requirements.txt +10 -0
.env.example
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEY=api_key
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.gradio
|
| 2 |
+
.env
|
app.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import asyncio
|
| 3 |
+
from websockets import connect, Data, ClientConnection
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import threading
|
| 8 |
+
import numpy as np
|
| 9 |
+
import base64
|
| 10 |
+
import soundfile as sf
|
| 11 |
+
import io
|
| 12 |
+
from pydub import AudioSegment
|
| 13 |
+
import time
|
| 14 |
+
import uuid
|
| 15 |
+
|
| 16 |
+
class LogColors:
|
| 17 |
+
OK = '\033[94m'
|
| 18 |
+
SUCCESS = '\033[92m'
|
| 19 |
+
WARNING = '\033[93m'
|
| 20 |
+
ERROR = '\033[91m'
|
| 21 |
+
ENDC = '\033[0m'
|
| 22 |
+
|
| 23 |
+
load_dotenv()
|
| 24 |
+
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
| 25 |
+
if not OPENAI_API_KEY:
|
| 26 |
+
raise ValueError("OPENAI_API_KEY environment variable must be set")
|
| 27 |
+
|
| 28 |
+
WEBSOCKET_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
|
| 29 |
+
WEBSOCKET_HEADERS = {
|
| 30 |
+
"Authorization": "Bearer " + OPENAI_API_KEY,
|
| 31 |
+
"OpenAI-Beta": "realtime=v1"
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
transcription = ""
|
| 35 |
+
css = """
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
connections = {}
|
| 39 |
+
|
| 40 |
+
class WebSocketClient:
|
| 41 |
+
def __init__(self, uri: str, headers: dict, client_id: str):
|
| 42 |
+
self.uri = uri
|
| 43 |
+
self.headers = headers
|
| 44 |
+
self.websocket: ClientConnection = None
|
| 45 |
+
self.queue = asyncio.Queue(maxsize=10)
|
| 46 |
+
self.loop = None
|
| 47 |
+
self.client_id = client_id
|
| 48 |
+
|
| 49 |
+
async def connect(self):
|
| 50 |
+
try:
|
| 51 |
+
self.websocket = await connect(self.uri, additional_headers=self.headers)
|
| 52 |
+
print(f"{LogColors.SUCCESS}Connected to OpenAI WebSocket{LogColors.ENDC}\n")
|
| 53 |
+
|
| 54 |
+
# Send session settings to OpenAI
|
| 55 |
+
with open("openai_transcription_settings.json", "r") as f:
|
| 56 |
+
settings = f.read()
|
| 57 |
+
await self.websocket.send(settings)
|
| 58 |
+
|
| 59 |
+
await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"{LogColors.ERROR}WebSocket Connection Error: {e}{LogColors.ENDC}")
|
| 62 |
+
|
| 63 |
+
def run(self):
|
| 64 |
+
self.loop = asyncio.new_event_loop()
|
| 65 |
+
asyncio.set_event_loop(self.loop)
|
| 66 |
+
self.loop.run_until_complete(self.connect())
|
| 67 |
+
|
| 68 |
+
def process_websocket_message(self, message: Data):
|
| 69 |
+
global transcription
|
| 70 |
+
message_object = json.loads(message)
|
| 71 |
+
if message_object["type"] != "error":
|
| 72 |
+
print(f"{LogColors.OK}Received message: {LogColors.ENDC} {message}")
|
| 73 |
+
if message_object["type"] == "conversation.item.input_audio_transcription.delta":
|
| 74 |
+
delta = message_object["delta"]
|
| 75 |
+
transcription += delta
|
| 76 |
+
elif message_object["type"] == "conversation.item.input_audio_transcription.completed":
|
| 77 |
+
transcription += ' ' if len(transcription) and transcription[-1] != ' ' else ''
|
| 78 |
+
else:
|
| 79 |
+
print(f"{LogColors.ERROR}Error: {message}{LogColors.ENDC}")
|
| 80 |
+
|
| 81 |
+
async def send_audio_chunks(self):
|
| 82 |
+
while True:
|
| 83 |
+
audio_data = await self.queue.get()
|
| 84 |
+
sample_rate, audio_array = audio_data
|
| 85 |
+
if self.websocket:
|
| 86 |
+
# Convert to mono if stereo
|
| 87 |
+
if audio_array.ndim > 1:
|
| 88 |
+
audio_array = audio_array.mean(axis=1)
|
| 89 |
+
|
| 90 |
+
# Convert to float32 and normalize
|
| 91 |
+
audio_array = audio_array.astype(np.float32)
|
| 92 |
+
audio_array /= np.max(np.abs(audio_array)) if np.max(np.abs(audio_array)) > 0 else 1.0
|
| 93 |
+
|
| 94 |
+
# Convert to 16-bit PCM
|
| 95 |
+
audio_array_int16 = (audio_array * 32767).astype(np.int16)
|
| 96 |
+
|
| 97 |
+
audio_buffer = io.BytesIO()
|
| 98 |
+
sf.write(audio_buffer, audio_array_int16, sample_rate, format='WAV', subtype='PCM_16')
|
| 99 |
+
audio_buffer.seek(0)
|
| 100 |
+
audio_segment = AudioSegment.from_file(audio_buffer, format="wav")
|
| 101 |
+
resampled_audio = audio_segment.set_frame_rate(24000)
|
| 102 |
+
|
| 103 |
+
output_buffer = io.BytesIO()
|
| 104 |
+
resampled_audio.export(output_buffer, format="wav")
|
| 105 |
+
output_buffer.seek(0)
|
| 106 |
+
base64_audio = base64.b64encode(output_buffer.read()).decode("utf-8")
|
| 107 |
+
|
| 108 |
+
await self.websocket.send(json.dumps({"type": "input_audio_buffer.append", "audio": base64_audio}))
|
| 109 |
+
print(f"{LogColors.OK}Sent audio chunk{LogColors.ENDC}")
|
| 110 |
+
|
| 111 |
+
async def receive_messages(self):
|
| 112 |
+
async for message in self.websocket:
|
| 113 |
+
self.process_websocket_message(message)
|
| 114 |
+
|
| 115 |
+
def enqueue_audio_chunk(self, sample_rate: int, chunk_array: np.ndarray):
|
| 116 |
+
if not self.queue.full():
|
| 117 |
+
asyncio.run_coroutine_threadsafe(self.queue.put((sample_rate, chunk_array)), self.loop)
|
| 118 |
+
else:
|
| 119 |
+
print(f"{LogColors.WARNING}Queue is full, dropping audio chunk{LogColors.ENDC}")
|
| 120 |
+
|
| 121 |
+
async def close(self):
|
| 122 |
+
if self.websocket:
|
| 123 |
+
await self.websocket.close()
|
| 124 |
+
connections.pop(self.client_id)
|
| 125 |
+
print(f"{LogColors.WARNING}WebSocket connection closed{LogColors.ENDC}")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def send_audio_chunk(new_chunk: gr.Audio, client_id: str):
|
| 129 |
+
if client_id not in connections:
|
| 130 |
+
return "Connection is being established, please try again in a few seconds."
|
| 131 |
+
sr, y = new_chunk
|
| 132 |
+
connections[client_id].enqueue_audio_chunk(sr, y)
|
| 133 |
+
return transcription
|
| 134 |
+
|
| 135 |
+
def create_new_websocket_connection():
|
| 136 |
+
client_id = str(uuid.uuid4())
|
| 137 |
+
connections[client_id] = WebSocketClient(WEBSOCKET_URI, WEBSOCKET_HEADERS, client_id)
|
| 138 |
+
threading.Thread(target=connections[client_id].run, daemon=True).start()
|
| 139 |
+
return client_id
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__":
|
| 142 |
+
with gr.Blocks(css=css) as demo:
|
| 143 |
+
gr.Markdown(f"# Realtime transcription demo")
|
| 144 |
+
with gr.Row():
|
| 145 |
+
with gr.Column():
|
| 146 |
+
output_textbox = gr.Textbox(label="Transcription", value="", lines=7, interactive=False, autoscroll=True)
|
| 147 |
+
with gr.Row():
|
| 148 |
+
with gr.Column(scale=5):
|
| 149 |
+
audio_input = gr.Audio(streaming=True, format="wav")
|
| 150 |
+
with gr.Column():
|
| 151 |
+
clear_button = gr.Button("Clear")
|
| 152 |
+
|
| 153 |
+
client_id = gr.State()
|
| 154 |
+
state = gr.State()
|
| 155 |
+
clear_button.click(lambda: None, outputs=[state]).then(lambda: "", outputs=[output_textbox])
|
| 156 |
+
audio_input.stream(send_audio_chunk, [audio_input, client_id], [output_textbox], stream_every=0.5, concurrency_limit=None)
|
| 157 |
+
demo.load(create_new_websocket_connection, outputs=[client_id])
|
| 158 |
+
|
| 159 |
+
threading.Thread(target=demo.launch(share=True), daemon=True).start()
|
| 160 |
+
|
| 161 |
+
while True:
|
| 162 |
+
time.sleep(1)
|
| 163 |
+
|
openai_transcription_settings.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"type": "transcription_session.update",
|
| 3 |
+
"session": {
|
| 4 |
+
"input_audio_format": "pcm16",
|
| 5 |
+
"input_audio_transcription": {
|
| 6 |
+
"model": "gpt-4o-transcribe",
|
| 7 |
+
"prompt": "",
|
| 8 |
+
"language": "en"
|
| 9 |
+
},
|
| 10 |
+
"turn_detection": {
|
| 11 |
+
"type": "semantic_vad",
|
| 12 |
+
"eagerness": "high"
|
| 13 |
+
},
|
| 14 |
+
"input_audio_noise_reduction": {
|
| 15 |
+
"type": "near_field"
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
asyncio
|
| 3 |
+
websockets
|
| 4 |
+
dotenv
|
| 5 |
+
threading
|
| 6 |
+
numpy
|
| 7 |
+
base64
|
| 8 |
+
soundfile
|
| 9 |
+
pydub
|
| 10 |
+
uuid
|